code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase_ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : Optional[str] = None ):
_lowerCamelCase : List[str] = (
os.path.join(__A,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCamelCase : int = Extractor
def lowerCamelCase_ ( self : List[str],__A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCamelCase : str = os.path.abspath(__A )
return os.path.join(self.extract_dir,hash_url_to_filename(__A ) )
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : bool ):
return force_extract or (
not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A ))
)
def lowerCamelCase_ ( self : Tuple,__A : str,__A : bool = False ):
_lowerCamelCase : Tuple = self.extractor.infer_extractor_format(__A )
if not extractor_format:
return input_path
_lowerCamelCase : int = self._get_output_path(__A )
if self._do_extract(__A,__A ):
self.extractor.extract(__A,__A,__A )
return output_path
class UpperCAmelCase__ ( A ):
@classmethod
@abstractmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],**__A : int ):
...
@staticmethod
@abstractmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
...
class UpperCAmelCase__ ( A , A ):
lowerCAmelCase_ = []
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : int ):
with open(__A,"rb" ) as f:
return f.read(__A )
@classmethod
def lowerCamelCase_ ( cls : int,__A : Union[Path, str],__A : bytes = b"" ):
if not magic_number:
_lowerCamelCase : int = max(len(__A ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCamelCase : Any = cls.read_magic_number(__A,__A )
except OSError:
return False
return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( A ):
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],**__A : Optional[int] ):
return tarfile.is_tarfile(__A )
@staticmethod
def lowerCamelCase_ ( __A : Optional[Any],__A : Dict ):
def resolved(__A : str ) -> str:
return os.path.realpath(os.path.abspath(__A ) )
def badpath(__A : str,__A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__A,__A ) ).startswith(__A )
def badlink(__A : int,__A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCamelCase : Union[str, Any] = resolved(os.path.join(__A,os.path.dirname(info.name ) ) )
return badpath(info.linkname,base=__A )
_lowerCamelCase : Optional[Any] = resolved(__A )
for finfo in members:
if badpath(finfo.name,__A ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(__A,__A ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(__A,__A ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
os.makedirs(__A,exist_ok=__A )
_lowerCamelCase : int = tarfile.open(__A )
tar_file.extractall(__A,members=TarExtractor.safemembers(__A,__A ) )
tar_file.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x1F\x8B']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with gzip.open(__A,"rb" ) as gzip_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],__A : bytes = b"" ):
if super().is_extractable(__A,magic_number=__A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__A,"rb" ) as fp:
_lowerCamelCase : Optional[Any] = _EndRecData(__A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCamelCase : List[Any] = fp.read(__A ) # CD is where we expect it to be
if len(__A ) == sizeCentralDir:
_lowerCamelCase : List[Any] = struct.unpack(__A,__A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
os.makedirs(__A,exist_ok=__A )
with zipfile.ZipFile(__A,"r" ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with lzma.open(__A ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__A,exist_ok=__A )
_lowerCamelCase : int = rarfile.RarFile(__A )
rf.extractall(__A )
rf.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_lowerCamelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(__A,"rb" ) as ifh, open(__A,"wb" ) as ofh:
dctx.copy_stream(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x42\x5A\x68']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with bza.open(__A,"rb" ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__A,exist_ok=__A )
with pyazr.SevenZipFile(__A,"r" ) as archive:
archive.extractall(__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x04\x22\x4D\x18']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__A,"rb" ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase_ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
return max(
len(__A )
for extractor in cls.extractors.values()
if issubclass(__A,__A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(__A,magic_number_length=__A )
except OSError:
return b""
@classmethod
def lowerCamelCase_ ( cls : List[str],__A : Union[Path, str],__A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.",category=__A,)
_lowerCamelCase : Optional[Any] = cls.infer_extractor_format(__A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase_ ( cls : Optional[int],__A : Union[Path, str] ): # <Added version="2.4.0"/>
_lowerCamelCase : Optional[int] = cls._get_magic_number_max_length()
_lowerCamelCase : Union[str, Any] = cls._read_magic_number(__A,__A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__A,magic_number=__A ):
return extractor_format
@classmethod
def lowerCamelCase_ ( cls : List[Any],__A : Union[Path, str],__A : Union[Path, str],__A : Optional[str] = None,__A : Optional[BaseExtractor] = "deprecated",):
os.makedirs(os.path.dirname(__A ),exist_ok=__A )
# Prevent parallel extractions
_lowerCamelCase : Union[str, Any] = str(Path(__A ).with_suffix(".lock" ) )
with FileLock(__A ):
shutil.rmtree(__A,ignore_errors=__A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__A,__A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.",category=__A,)
_lowerCamelCase : Dict = extractor if extractor != "deprecated" else extractor_format
else:
_lowerCamelCase : str = cls.extractors[extractor_format]
return extractor.extract(__A,__A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.",category=__A,)
for extractor in cls.extractors.values():
if extractor.is_extractable(__A ):
return extractor.extract(__A,__A ) | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'perceiver'
def __init__( self : Tuple,__A : Tuple=2_5_6,__A : Dict=1_2_8_0,__A : str=7_6_8,__A : int=1,__A : List[Any]=2_6,__A : Optional[int]=8,__A : str=8,__A : Tuple=None,__A : int=None,__A : int="kv",__A : Tuple=1,__A : str=1,__A : Union[str, Any]="gelu",__A : Optional[Any]=0.1,__A : Any=0.02,__A : List[Any]=1e-12,__A : Any=True,__A : Union[str, Any]=2_6_2,__A : Tuple=2_0_4_8,__A : Any=5_6,__A : Optional[int]=[3_6_8, 4_9_6],__A : Optional[Any]=1_6,__A : Optional[Any]=1_9_2_0,__A : Any=1_6,__A : Union[str, Any]=[1, 1_6, 2_2_4, 2_2_4],**__A : str,):
super().__init__(**__A )
_lowerCamelCase : str = num_latents
_lowerCamelCase : List[str] = d_latents
_lowerCamelCase : Optional[Any] = d_model
_lowerCamelCase : Tuple = num_blocks
_lowerCamelCase : Union[str, Any] = num_self_attends_per_block
_lowerCamelCase : Dict = num_self_attention_heads
_lowerCamelCase : List[Any] = num_cross_attention_heads
_lowerCamelCase : int = qk_channels
_lowerCamelCase : Dict = v_channels
_lowerCamelCase : Optional[int] = cross_attention_shape_for_attention
_lowerCamelCase : str = self_attention_widening_factor
_lowerCamelCase : List[str] = cross_attention_widening_factor
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : str = use_query_residual
# masked language modeling attributes
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[Any] = max_position_embeddings
# image classification attributes
_lowerCamelCase : Dict = image_size
# flow attributes
_lowerCamelCase : str = train_size
# multimodal autoencoding attributes
_lowerCamelCase : Any = num_frames
_lowerCamelCase : Dict = audio_samples_per_frame
_lowerCamelCase : Tuple = samples_per_patch
_lowerCamelCase : int = output_shape
class UpperCAmelCase__ ( A ):
@property
def lowerCamelCase_ ( self : List[Any] ):
if self.task == "multiple-choice":
_lowerCamelCase : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return 1e-4
def lowerCamelCase_ ( self : List[Any],__A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],__A : int = -1,__A : int = -1,__A : int = -1,__A : bool = False,__A : Optional[TensorType] = None,__A : int = 3,__A : int = 4_0,__A : int = 4_0,):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__A,__A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase : Dict = compute_effective_axis_dimension(
__A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase : str = preprocessor.num_special_tokens_to_add(__A )
_lowerCamelCase : Any = compute_effective_axis_dimension(
__A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase : int = [" ".join(["a"] ) * seq_length] * batch_size
_lowerCamelCase : int = dict(preprocessor(__A,return_tensors=__A ) )
_lowerCamelCase : int = inputs.pop("input_ids" )
return inputs
elif isinstance(__A,__A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase : int = compute_effective_axis_dimension(__A,fixed_dimension=OnnxConfig.default_fixed_batch )
_lowerCamelCase : Tuple = self._generate_dummy_images(__A,__A,__A,__A )
_lowerCamelCase : Any = dict(preprocessor(images=__A,return_tensors=__A ) )
_lowerCamelCase : Optional[int] = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." ) | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return False
_lowerCamelCase : int = len(_lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase_ : Any = [int(item.strip()) for item in user_input.split(',')]
UpperCAmelCase_ : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
UpperCAmelCase_ : Union[str, Any] = '' if binary_search(sequence, target) else 'not '
print(f'''{target} was {not_str}found in {sequence}''') | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : bool = True , _lowerCAmelCase : float = math.inf , _lowerCAmelCase : float = -math.inf , _lowerCAmelCase : float = math.inf , _lowerCAmelCase : float = -math.inf , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 100 , _lowerCAmelCase : float = 0.0_1 , _lowerCAmelCase : float = 1 , ):
"""simple docstring"""
_lowerCamelCase : int = False
_lowerCamelCase : Optional[int] = search_prob
_lowerCamelCase : Dict = start_temperate
_lowerCamelCase : List[str] = []
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : int = None
while not search_end:
_lowerCamelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(_lowerCAmelCase )
iterations += 1
_lowerCamelCase : str = None
_lowerCamelCase : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : Dict = random.randint(0 , len(_lowerCAmelCase ) - 1 ) # picking a random neighbor
_lowerCamelCase : Union[str, Any] = neighbors.pop(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : Tuple = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : str = picked_neighbor
else:
_lowerCamelCase : Dict = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : str = picked_neighbor
_lowerCamelCase : str = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowerCAmelCase ) , _lowerCAmelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : List[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase_ : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Dict = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
UpperCAmelCase_ : Union[str, Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : Dict = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'''{local_min.score()}'''
)
UpperCAmelCase_ : str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase_ : str = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'''{local_min.score()}'''
) | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def A_ ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = encode(input("-> " ).strip().lower() )
print("Encoded: " , _lowerCAmelCase )
print("Decoded:" , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
if len(_lowerCAmelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
_lowerCamelCase : Union[str, Any] = sum(array[:k] )
for i in range(len(_lowerCAmelCase ) - k ):
_lowerCamelCase : Dict = current_sum - array[i] + array[i + k]
_lowerCamelCase : Any = max(_lowerCAmelCase , _lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
UpperCAmelCase_ : str = [randint(-1000, 1000) for i in range(100)]
UpperCAmelCase_ : str = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''') | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[Any],*__A : Union[str, Any],**__A : Any ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.",__A,)
super().__init__(*__A,**__A ) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = {}
_lowerCamelCase : Optional[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : Any = len(example["content"] ) / len(output["input_ids"] )
return output
UpperCAmelCase_ : str = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ : Any = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : Any = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ : int = time.time()
UpperCAmelCase_ : int = load_dataset(args.dataset_name, split='train')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : int = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''') | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
assert column_title.isupper()
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Union[str, Any] = len(_lowerCAmelCase ) - 1
_lowerCamelCase : List[Any] = 0
while index >= 0:
_lowerCamelCase : List[str] = (ord(column_title[index] ) - 64) * pow(26 , _lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=0.9_9_9 , _lowerCAmelCase : Any="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase : List[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase : Optional[Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_lowerCamelCase : Tuple = []
for i in range(_lowerCAmelCase ):
_lowerCamelCase : int = i / num_diffusion_timesteps
_lowerCamelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class UpperCAmelCase__ ( A , A ):
@register_to_config
def __init__( self : Any,__A : int = 1_0_0_0,__A : str = "fixed_small_log",__A : bool = True,__A : Optional[float] = 1.0,__A : str = "epsilon",__A : str = "squaredcos_cap_v2",):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__A )
_lowerCamelCase : str = 1.0 - self.betas
_lowerCamelCase : Any = torch.cumprod(self.alphas,dim=0 )
_lowerCamelCase : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Optional[Any] = 1.0
# setable values
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = torch.from_numpy(np.arange(0,__A )[::-1].copy() )
_lowerCamelCase : Optional[int] = variance_type
def lowerCamelCase_ ( self : Union[str, Any],__A : torch.FloatTensor,__A : Optional[int] = None ):
return sample
def lowerCamelCase_ ( self : List[Any],__A : int,__A : Union[str, torch.device] = None ):
_lowerCamelCase : Dict = num_inference_steps
_lowerCamelCase : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0,__A ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : List[str] = torch.from_numpy(__A ).to(__A )
def lowerCamelCase_ ( self : str,__A : Optional[Any],__A : str=None,__A : str=None,__A : Union[str, Any]=None ):
if prev_timestep is None:
_lowerCamelCase : Optional[int] = t - 1
_lowerCamelCase : str = self.alphas_cumprod[t]
_lowerCamelCase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : int = 1 - alpha_prod_t
_lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
else:
_lowerCamelCase : Optional[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Tuple = torch.log(torch.clamp(__A,min=1e-20 ) )
_lowerCamelCase : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : Optional[int] = variance.log()
_lowerCamelCase : List[Any] = beta.log()
_lowerCamelCase : Optional[Any] = (predicted_variance + 1) / 2
_lowerCamelCase : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self : int,__A : torch.FloatTensor,__A : int,__A : torch.FloatTensor,__A : Optional[int] = None,__A : Optional[Any]=None,__A : bool = True,):
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase , _lowerCamelCase : Optional[Any] = torch.split(__A,sample.shape[1],dim=1 )
else:
_lowerCamelCase : int = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : Optional[Any] = t - 1
_lowerCamelCase : str = self.alphas_cumprod[t]
_lowerCamelCase : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Any = 1 - alpha_prod_t
_lowerCamelCase : Optional[int] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Optional[Any] = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : str = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : int = torch.clamp(
__A,-self.config.clip_sample_range,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : Tuple = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : List[Any] = 0
if t > 0:
_lowerCamelCase : Tuple = randn_tensor(
model_output.shape,dtype=model_output.dtype,generator=__A,device=model_output.device )
_lowerCamelCase : Optional[Any] = self._get_variance(
__A,predicted_variance=__A,prev_timestep=__A,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Union[str, Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Any = (0.5 * variance).exp()
else:
raise ValueError(
f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
" for the UnCLIPScheduler." )
_lowerCamelCase : Tuple = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__A,pred_original_sample=__A )
def lowerCamelCase_ ( self : Optional[Any],__A : torch.FloatTensor,__A : torch.FloatTensor,__A : torch.IntTensor,):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_lowerCamelCase : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device,dtype=original_samples.dtype )
_lowerCamelCase : str = timesteps.to(original_samples.device )
_lowerCamelCase : str = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : Dict = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : str = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) )
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Optional[Any] = (
"Wrong input data's shape... "
F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : Union[str, Any] = (
"Input data have different datatype... "
F'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_lowerCAmelCase )
_lowerCamelCase : str = []
for value in value_array:
_lowerCamelCase : List[str] = euclidean(_lowerCAmelCase , dataset[0] )
_lowerCamelCase : List[str] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : Optional[int] = euclidean(_lowerCAmelCase , _lowerCAmelCase )
if dist > temp_dist:
_lowerCamelCase : List[str] = temp_dist
_lowerCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[int],__A : Union[str, Any]=0.01,__A : str=1_0_0_0 ):
_lowerCamelCase : Any = p_stop
_lowerCamelCase : Tuple = max_length
def __iter__( self : Tuple ):
_lowerCamelCase : int = 0
_lowerCamelCase : str = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : List[Any] = random.random() < self.p_stop
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str],__A : Union[str, Any]=False,__A : Dict=True ):
_lowerCamelCase : Tuple = [
BatchSamplerShard(__A,2,__A,split_batches=__A,even_batches=__A )
for i in range(2 )
]
_lowerCamelCase : Dict = [list(__A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__A ) for shard in batch_sampler_shards],[len(__A ) for e in expected] )
self.assertListEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
# Check the shards when the dataset is a round multiple of total batch size.
_lowerCamelCase : int = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : int = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Any = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Any = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Tuple = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Tuple = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : int = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Any = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : List[Any] = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Tuple = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : List[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : int = [[], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
def lowerCamelCase_ ( self : Dict ):
# Check the shards when the dataset is a round multiple of total batch size.
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : str = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : List[str] = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : List[str] = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[str] = [[], []]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
def lowerCamelCase_ ( self : Tuple ):
# Check the shards when the dataset is a round multiple of batch size.
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : List[Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : str = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : int = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_lowerCamelCase : List[str] = [BatchSamplerShard(__A,2,__A,even_batches=__A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ),3 )
self.assertEqual(len(batch_sampler_shards[1] ),2 )
self.assertListEqual(list(batch_sampler_shards[0] ),[[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ),[[3, 4], [9, 1_0, 1_1]] )
def lowerCamelCase_ ( self : int,__A : List[str],__A : int,__A : List[str],__A : Optional[int]=False,__A : List[str]=2,__A : Optional[Any]=False ):
random.seed(__A )
_lowerCamelCase : str = list(__A )
_lowerCamelCase : List[str] = [
IterableDatasetShard(
__A,batch_size=__A,drop_last=__A,num_processes=__A,process_index=__A,split_batches=__A,)
for i in range(__A )
]
_lowerCamelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__A )
iterable_dataset_lists.append(list(__A ) )
_lowerCamelCase : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Any = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__A ),len(__A ) )
self.assertTrue(len(__A ) % shard_batch_size == 0 )
_lowerCamelCase : Optional[int] = []
for idx in range(0,len(__A ),__A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__A ) < len(__A ):
reference += reference
self.assertListEqual(__A,reference[: len(__A )] )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = 4_2
_lowerCamelCase : Optional[int] = RandomIterableDataset()
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
# Edge case with a very small dataset
_lowerCamelCase : Tuple = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = BatchSampler(range(1_6 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Tuple = SkipBatchSampler(__A,2 )
self.assertListEqual(list(__A ),[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = SkipDataLoader(list(range(1_6 ) ),batch_size=4,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader],[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = DataLoader(list(range(1_6 ) ),batch_size=4 )
_lowerCamelCase : List[str] = skip_first_batches(__A,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader],[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[int] = DataLoaderShard(list(range(1_6 ) ),batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
def lowerCamelCase_ ( self : Dict ):
Accelerator()
_lowerCamelCase : List[Any] = DataLoaderDispatcher(range(1_6 ),batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 ) | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'decision_transformer'
lowerCAmelCase_ = ['past_key_values']
lowerCAmelCase_ = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str,__A : Any=1_7,__A : List[Any]=4,__A : int=1_2_8,__A : List[str]=4_0_9_6,__A : Tuple=True,__A : List[Any]=1,__A : Any=1_0_2_4,__A : Any=3,__A : Optional[Any]=1,__A : Optional[int]=None,__A : Tuple="relu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Optional[Any]=0.1,__A : List[str]=1e-5,__A : List[str]=0.02,__A : Tuple=True,__A : Dict=True,__A : Any=5_0_2_5_6,__A : List[Any]=5_0_2_5_6,__A : Optional[int]=False,__A : int=False,**__A : Tuple,):
_lowerCamelCase : Optional[Any] = state_dim
_lowerCamelCase : str = act_dim
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Any = max_ep_len
_lowerCamelCase : Dict = action_tanh
_lowerCamelCase : int = vocab_size
_lowerCamelCase : List[Any] = n_positions
_lowerCamelCase : int = n_layer
_lowerCamelCase : Any = n_head
_lowerCamelCase : Optional[Any] = n_inner
_lowerCamelCase : List[Any] = activation_function
_lowerCamelCase : str = resid_pdrop
_lowerCamelCase : str = embd_pdrop
_lowerCamelCase : Any = attn_pdrop
_lowerCamelCase : int = layer_norm_epsilon
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Union[str, Any] = scale_attn_weights
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : Optional[int] = scale_attn_by_inverse_layer_idx
_lowerCamelCase : List[Any] = reorder_and_upcast_attn
_lowerCamelCase : Optional[int] = bos_token_id
_lowerCamelCase : int = eos_token_id
super().__init__(bos_token_id=__A,eos_token_id=__A,**__A ) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ : str = threading.Lock()
UpperCAmelCase_ : Optional[logging.Handler] = None
UpperCAmelCase_ : List[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase_ : str = logging.WARNING
UpperCAmelCase_ : Dict = True
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = os.getenv("TRANSFORMERS_VERBOSITY" , _lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def A_ ( ):
"""simple docstring"""
return __name__.split("." )[0]
def A_ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def A_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCamelCase : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCamelCase : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCamelCase : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCamelCase : Tuple = False
def A_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCamelCase : str = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCamelCase : int = None
def A_ ( ):
"""simple docstring"""
return log_levels
def A_ ( _lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if name is None:
_lowerCamelCase : Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A_ ( _lowerCAmelCase : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCamelCase : Union[str, Any] = False
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCamelCase : List[Any] = True
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
_lowerCamelCase : Dict = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_lowerCAmelCase )
def A_ ( self : List[str] , *_lowerCAmelCase : int , **_lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , _lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : Any = warning_advice
@functools.lru_cache(_lowerCAmelCase )
def A_ ( self : Dict , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
"""simple docstring"""
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : int = warning_once
class UpperCAmelCase__ :
def __init__( self : Optional[Any],*__A : List[str],**__A : Union[str, Any] ): # pylint: disable=unused-argument
_lowerCamelCase : Optional[Any] = args[0] if args else None
def __iter__( self : str ):
return iter(self._iterator )
def __getattr__( self : List[Any],__A : Union[str, Any] ):
def empty_fn(*__A : Optional[Any],**__A : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ):
return self
def __exit__( self : Any,__A : List[Any],__A : Optional[int],__A : int ):
return
class UpperCAmelCase__ :
def __call__( self : Optional[Any],*__A : Optional[Any],**__A : Union[str, Any] ):
if _tqdm_active:
return tqdm_lib.tqdm(*__A,**__A )
else:
return EmptyTqdm(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Tuple,**__A : Optional[int] ):
_lowerCamelCase : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A,**__A )
def lowerCamelCase_ ( self : Optional[Any] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ : str = _tqdm_cls()
def A_ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def A_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCamelCase : Union[str, Any] = True
hf_hub_utils.enable_progress_bars()
def A_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCamelCase : List[Any] = False
hf_hub_utils.disable_progress_bars() | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase )
if n > 1:
factors.append(_lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = BlenderbotSmallTokenizer
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
super().setUp()
_lowerCamelCase : str = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
_lowerCamelCase : List[Any] = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : int = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
_lowerCamelCase : Optional[Any] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Optional[int] ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Any = "adapt act apte"
_lowerCamelCase : List[str] = "adapt act apte"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Tuple = BlenderbotSmallTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "adapt act apte"
_lowerCamelCase : Optional[Any] = ["adapt", "act", "ap@@", "te"]
_lowerCamelCase : Any = tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Optional[int] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : List[str] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
_lowerCamelCase : Optional[Any] = "I am a small frog."
_lowerCamelCase : Union[str, Any] = tok([src_text],padding=__A,truncation=__A )["input_ids"]
_lowerCamelCase : List[str] = tok.batch_decode(__A,skip_special_tokens=__A,clean_up_tokenization_spaces=__A )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Tuple = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_lowerCamelCase : Dict = "I am a small frog ."
_lowerCamelCase : Any = "."
_lowerCamelCase : Any = tok(__A )["input_ids"]
_lowerCamelCase : Optional[Any] = tok(__A )["input_ids"]
assert encoded[-1] == encoded_dot[0] | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaPriorPipeline
lowerCAmelCase_ = ['prompt']
lowerCAmelCase_ = ['prompt', 'negative_prompt']
lowerCAmelCase_ = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def lowerCamelCase_ ( self : List[Any] ):
return 3_2
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return 3_2
@property
def lowerCamelCase_ ( self : Optional[int] ):
return self.time_input_dim
@property
def lowerCamelCase_ ( self : Tuple ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : Optional[int] ):
return 1_0_0
@property
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCamelCase_ ( self : Tuple ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=self.text_embedder_hidden_size,projection_dim=self.text_embedder_hidden_size,intermediate_size=3_7,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,)
return CLIPTextModelWithProjection(__A )
@property
def lowerCamelCase_ ( self : Any ):
torch.manual_seed(0 )
_lowerCamelCase : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_lowerCamelCase : Any = PriorTransformer(**__A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowerCamelCase : Optional[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,image_size=2_2_4,projection_dim=self.text_embedder_hidden_size,intermediate_size=3_7,num_attention_heads=4,num_channels=3,num_hidden_layers=5,patch_size=1_4,)
_lowerCamelCase : Any = CLIPVisionModelWithProjection(__A )
return model
@property
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = CLIPImageProcessor(
crop_size=2_2_4,do_center_crop=__A,do_normalize=__A,do_resize=__A,image_mean=[0.48145466, 0.4578275, 0.40821073],image_std=[0.26862954, 0.26130258, 0.27577711],resample=3,size=2_2_4,)
return image_processor
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[Any] = self.dummy_prior
_lowerCamelCase : Any = self.dummy_image_encoder
_lowerCamelCase : Any = self.dummy_text_encoder
_lowerCamelCase : List[str] = self.dummy_tokenizer
_lowerCamelCase : List[str] = self.dummy_image_processor
_lowerCamelCase : Optional[Any] = UnCLIPScheduler(
variance_type="fixed_small_log",prediction_type="sample",num_train_timesteps=1_0_0_0,clip_sample=__A,clip_sample_range=10.0,)
_lowerCamelCase : Optional[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCamelCase_ ( self : int,__A : List[Any],__A : Tuple=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : Any = torch.manual_seed(__A )
else:
_lowerCamelCase : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Tuple = "cpu"
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : int = self.pipeline_class(**__A )
_lowerCamelCase : str = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : int = pipe(**self.get_dummy_inputs(__A ) )
_lowerCamelCase : Any = output.image_embeds
_lowerCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__A ),return_dict=__A,)[0]
_lowerCamelCase : Any = image[0, -1_0:]
_lowerCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
_lowerCamelCase : str = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Tuple = torch_device == "cpu"
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Dict = False
self._test_inference_batch_single_identical(
test_max_difference=__A,relax_max_difference=__A,test_mean_pixel_difference=__A,)
@skip_mps
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = torch_device == "cpu"
_lowerCamelCase : Any = False
self._test_attention_slicing_forward_pass(
test_max_difference=__A,test_mean_pixel_difference=__A,) | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : List[str] = 3
class UpperCAmelCase__ ( A ):
pass
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
for shard in shards:
for i in range(_lowerCAmelCase ):
yield {"i": i, "shard": shard}
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = int(os.environ["RANK"] )
_lowerCamelCase : List[str] = int(os.environ["WORLD_SIZE"] )
_lowerCamelCase : Optional[Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCAmelCase )
parser.add_argument("--local_rank" , type=_lowerCAmelCase )
parser.add_argument("--num_workers" , type=_lowerCAmelCase , default=0 )
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : Tuple = args.streaming
_lowerCamelCase : Any = args.num_workers
_lowerCamelCase : Tuple = {"shards": [F'shard_{shard_idx}' for shard_idx in range(_lowerCAmelCase )]}
_lowerCamelCase : int = IterableDataset.from_generator(_lowerCAmelCase , gen_kwargs=_lowerCAmelCase )
if not streaming:
_lowerCamelCase : List[str] = Dataset.from_list(list(_lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = split_dataset_by_node(_lowerCAmelCase , rank=_lowerCAmelCase , world_size=_lowerCAmelCase )
_lowerCamelCase : List[str] = torch.utils.data.DataLoader(_lowerCAmelCase , num_workers=_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_lowerCamelCase : Optional[int] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_lowerCamelCase : Union[str, Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'mgp-str'
def __init__( self : Union[str, Any],__A : Optional[int]=[3_2, 1_2_8],__A : Tuple=4,__A : Any=3,__A : int=2_7,__A : Union[str, Any]=3_8,__A : List[Any]=5_0_2_5_7,__A : List[Any]=3_0_5_2_2,__A : Optional[int]=7_6_8,__A : List[str]=1_2,__A : List[str]=1_2,__A : Optional[Any]=4.0,__A : List[Any]=True,__A : List[Any]=False,__A : Tuple=1e-5,__A : Any=0.0,__A : List[Any]=0.0,__A : List[Any]=0.0,__A : Optional[Any]=False,__A : List[Any]=0.02,**__A : Optional[Any],):
super().__init__(**__A )
_lowerCamelCase : Any = image_size
_lowerCamelCase : Optional[int] = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : List[Any] = max_token_length
_lowerCamelCase : str = num_character_labels
_lowerCamelCase : Dict = num_bpe_labels
_lowerCamelCase : Optional[Any] = num_wordpiece_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : List[str] = mlp_ratio
_lowerCamelCase : List[str] = distilled
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = drop_rate
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : Dict = attn_drop_rate
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : List[Any] = output_aa_attentions
_lowerCamelCase : Tuple = initializer_range | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
while b:
_lowerCamelCase , _lowerCamelCase : List[str] = b, a % b
return a
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(_lowerCAmelCase , a % b )
def A_ ( ):
"""simple docstring"""
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'codegen'
lowerCAmelCase_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any,__A : Optional[int]=5_0_4_0_0,__A : Any=2_0_4_8,__A : str=2_0_4_8,__A : int=4_0_9_6,__A : Any=2_8,__A : str=1_6,__A : Optional[Any]=6_4,__A : str=None,__A : Optional[int]="gelu_new",__A : Optional[int]=0.0,__A : List[Any]=0.0,__A : str=0.0,__A : Union[str, Any]=1e-5,__A : Tuple=0.02,__A : str=True,__A : Any=5_0_2_5_6,__A : Optional[Any]=5_0_2_5_6,__A : int=False,**__A : Optional[int],):
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Tuple = n_ctx
_lowerCamelCase : int = n_positions
_lowerCamelCase : int = n_embd
_lowerCamelCase : List[str] = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : List[Any] = n_inner
_lowerCamelCase : str = rotary_dim
_lowerCamelCase : Optional[int] = activation_function
_lowerCamelCase : Any = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : Any = attn_pdrop
_lowerCamelCase : Optional[int] = layer_norm_epsilon
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : str = eos_token_id
super().__init__(
bos_token_id=__A,eos_token_id=__A,tie_word_embeddings=__A,**__A )
class UpperCAmelCase__ ( A ):
def __init__( self : Any,__A : PretrainedConfig,__A : str = "default",__A : List[PatchingSpec] = None,__A : bool = False,):
super().__init__(__A,task=__A,patching_specs=__A,use_past=__A )
if not getattr(self._config,"pad_token_id",__A ):
# TODO: how to do that better?
_lowerCamelCase : Any = 0
@property
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__A,direction="inputs" )
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase_ ( self : List[str] ):
return self._config.n_layer
@property
def lowerCamelCase_ ( self : List[str] ):
return self._config.n_head
def lowerCamelCase_ ( self : Optional[Any],__A : PreTrainedTokenizer,__A : int = -1,__A : int = -1,__A : bool = False,__A : Optional[TensorType] = None,):
_lowerCamelCase : str = super(__A,self ).generate_dummy_inputs(
__A,batch_size=__A,seq_length=__A,is_pair=__A,framework=__A )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : Optional[int] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase , _lowerCamelCase : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCamelCase : List[Any] = seqlen + 2
_lowerCamelCase : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCamelCase : Dict = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowerCamelCase : Tuple = common_inputs["attention_mask"]
if self.use_past:
_lowerCamelCase : Optional[int] = ordered_inputs["attention_mask"].dtype
_lowerCamelCase : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__A,__A,dtype=__A )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self : Optional[int] ):
return 1_3 | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( A ):
# to overwrite at feature extractactor specific tests
lowerCAmelCase_ = None
lowerCAmelCase_ = None
@property
def lowerCamelCase_ ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__A,"feature_size" ) )
self.assertTrue(hasattr(__A,"sampling_rate" ) )
self.assertTrue(hasattr(__A,"padding_value" ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : List[Any] = feat_extract.model_input_names[0]
_lowerCamelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__A ) == len(__A ) for x, y in zip(__A,processed_features[input_name] ) ) )
_lowerCamelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__A )
_lowerCamelCase : Any = BatchFeature({input_name: speech_inputs},tensor_type="np" )
_lowerCamelCase : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__A )
_lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : Union[str, Any] = feat_extract.model_input_names[0]
_lowerCamelCase : int = BatchFeature({input_name: speech_inputs},tensor_type="pt" )
_lowerCamelCase : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__A )
_lowerCamelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : str = feat_extract.model_input_names[0]
_lowerCamelCase : int = BatchFeature({input_name: speech_inputs},tensor_type="tf" )
_lowerCamelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCamelCase : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowerCamelCase_ ( self : int,__A : List[Any]=False ):
def _inputs_have_equal_length(__A : Tuple ):
_lowerCamelCase : Optional[int] = len(input[0] )
for input_slice in input[1:]:
if len(__A ) != length:
return False
return True
def _inputs_are_equal(__A : Union[str, Any],__A : int ):
if len(__A ) != len(__A ):
return False
for input_slice_a, input_slice_a in zip(__A,__A ):
if not np.allclose(np.asarray(__A ),np.asarray(__A ),atol=1e-3 ):
return False
return True
_lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=__A )
_lowerCamelCase : List[str] = feat_extract.model_input_names[0]
_lowerCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Tuple = self.feat_extract_tester.seq_length_diff
_lowerCamelCase : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
_lowerCamelCase : Optional[int] = self.feat_extract_tester.min_seq_length
_lowerCamelCase : Dict = self.feat_extract_tester.batch_size
_lowerCamelCase : Union[str, Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_lowerCamelCase : Tuple = feat_extract.pad(__A,padding=__A )
_lowerCamelCase : Dict = input_a[input_name]
_lowerCamelCase : int = feat_extract.pad(__A,padding="longest" )
_lowerCamelCase : int = input_a[input_name]
_lowerCamelCase : int = feat_extract.pad(__A,padding="max_length",max_length=len(speech_inputs[-1] ) )
_lowerCamelCase : Tuple = input_a[input_name]
_lowerCamelCase : List[Any] = feat_extract.pad(__A,padding="longest",return_tensors="np" )
_lowerCamelCase : Any = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__A ):
feat_extract.pad(__A,padding="max_length" )[input_name]
_lowerCamelCase : Dict = feat_extract.pad(
__A,padding="max_length",max_length=__A,return_tensors="np" )
_lowerCamelCase : Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__A ) )
self.assertTrue(_inputs_have_equal_length(__A ) )
self.assertTrue(_inputs_have_equal_length(__A ) )
self.assertTrue(_inputs_are_equal(__A,__A ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_lowerCamelCase : str = feat_extract.pad(__A,pad_to_multiple_of=1_0 )
_lowerCamelCase : Union[str, Any] = input_a[input_name]
_lowerCamelCase : str = feat_extract.pad(__A,padding="longest",pad_to_multiple_of=1_0 )
_lowerCamelCase : Dict = input_a[input_name]
_lowerCamelCase : Optional[Any] = feat_extract.pad(
__A,padding="max_length",pad_to_multiple_of=1_0,max_length=__A )
_lowerCamelCase : Tuple = input_a[input_name]
_lowerCamelCase : List[Any] = feat_extract.pad(
__A,padding="max_length",pad_to_multiple_of=1_0,max_length=__A,return_tensors="np",)
_lowerCamelCase : List[str] = input_a[input_name]
self.assertTrue(all(len(__A ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__A,__A ) )
_lowerCamelCase : Any = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(__A ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2],(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_lowerCamelCase : List[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def lowerCamelCase_ ( self : int,__A : Dict=False ):
def _inputs_have_equal_length(__A : int ):
_lowerCamelCase : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(__A ) != length:
return False
return True
def _inputs_are_equal(__A : Dict,__A : Optional[int] ):
if len(__A ) != len(__A ):
return False
for input_slice_a, input_slice_a in zip(__A,__A ):
if not np.allclose(np.asarray(__A ),np.asarray(__A ),atol=1e-3 ):
return False
return True
_lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=__A )
_lowerCamelCase : List[Any] = feat_extract.model_input_names[0]
_lowerCamelCase : List[str] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_lowerCamelCase : Union[str, Any] = feat_extract.pad(
__A,padding="max_length",max_length=len(speech_inputs[0] ),truncation=__A )
_lowerCamelCase : List[Any] = input_a[input_name]
_lowerCamelCase : List[str] = feat_extract.pad(__A,padding="max_length",max_length=len(speech_inputs[0] ) )
_lowerCamelCase : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__A ) )
self.assertFalse(_inputs_have_equal_length(__A ) )
# truncate to smallest with np
_lowerCamelCase : Dict = feat_extract.pad(
__A,padding="max_length",max_length=len(speech_inputs[0] ),return_tensors="np",truncation=__A,)
_lowerCamelCase : Optional[Any] = input_a[input_name]
_lowerCamelCase : Union[str, Any] = feat_extract.pad(
__A,padding="max_length",max_length=len(speech_inputs[0] ),return_tensors="np" )
_lowerCamelCase : Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__A ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__A ) )
# truncate to middle
_lowerCamelCase : List[Any] = feat_extract.pad(
__A,padding="max_length",max_length=len(speech_inputs[1] ),truncation=__A,return_tensors="np",)
_lowerCamelCase : Tuple = input_a[input_name]
_lowerCamelCase : Union[str, Any] = feat_extract.pad(
__A,padding="max_length",max_length=len(speech_inputs[1] ),truncation=__A )
_lowerCamelCase : str = input_a[input_name]
_lowerCamelCase : List[str] = feat_extract.pad(
__A,padding="max_length",max_length=len(speech_inputs[1] ),return_tensors="np" )
_lowerCamelCase : str = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__A ) )
self.assertTrue(_inputs_have_equal_length(__A ) )
self.assertTrue(_inputs_are_equal(__A,__A ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__A ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__A ):
feat_extract.pad(__A,truncation=__A )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__A ):
feat_extract.pad(__A,padding="longest",truncation=__A )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__A ):
feat_extract.pad(__A,padding="longest",truncation=__A )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__A ):
feat_extract.pad(__A,padding="max_length",truncation=__A )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_lowerCamelCase : Optional[int] = 1_2
_lowerCamelCase : Dict = feat_extract.pad(
__A,padding="max_length",max_length=len(speech_inputs[0] ),pad_to_multiple_of=__A,truncation=__A,)
_lowerCamelCase : str = input_a[input_name]
_lowerCamelCase : Any = feat_extract.pad(
__A,padding="max_length",max_length=len(speech_inputs[0] ),pad_to_multiple_of=__A,)
_lowerCamelCase : int = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_lowerCamelCase : Union[str, Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_lowerCamelCase : Tuple = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__A ) )
self.assertFalse(_inputs_have_equal_length(__A ) )
def lowerCamelCase_ ( self : Any ):
self._check_padding(numpify=__A )
def lowerCamelCase_ ( self : str ):
self._check_padding(numpify=__A )
def lowerCamelCase_ ( self : Optional[Any] ):
self._check_truncation(numpify=__A )
def lowerCamelCase_ ( self : Any ):
self._check_truncation(numpify=__A )
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCamelCase : Tuple = feat_extract.model_input_names[0]
_lowerCamelCase : str = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : int = feat_extract.pad(__A,padding="longest",return_tensors="np" )[input_name]
_lowerCamelCase : str = feat_extract.pad(__A,padding="longest",return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCamelCase : Any = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCamelCase : List[Any] = feat_extract.model_input_names[0]
_lowerCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Optional[Any] = feat_extract.pad(__A,padding="longest",return_tensors="np" )[input_name]
_lowerCamelCase : Tuple = feat_extract.pad(__A,padding="longest",return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = self.feat_extract_dict
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : str = self.feature_extraction_class(**__A )
_lowerCamelCase : int = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCamelCase : Optional[int] = [len(__A ) for x in speech_inputs]
_lowerCamelCase : str = feat_extract.model_input_names[0]
_lowerCamelCase : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Dict = feat_extract.pad(__A,padding="longest",return_tensors="np" )
self.assertIn("attention_mask",__A )
self.assertListEqual(list(processed.attention_mask.shape ),list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(),__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : str = self.feat_extract_dict
_lowerCamelCase : Any = True
_lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**__A )
_lowerCamelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCamelCase : List[str] = [len(__A ) for x in speech_inputs]
_lowerCamelCase : Optional[Any] = feat_extract.model_input_names[0]
_lowerCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
_lowerCamelCase : Any = min(__A )
_lowerCamelCase : Any = feat_extract.pad(
__A,padding="max_length",max_length=__A,truncation=__A,return_tensors="np" )
self.assertIn("attention_mask",__A )
self.assertListEqual(
list(processed_pad.attention_mask.shape ),[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(),[max_length for x in speech_inputs] ) | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : bool = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
_lowerCamelCase : Any = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
_lowerCamelCase : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
_lowerCamelCase : str = primes[:idx]
break
_lowerCamelCase , _lowerCamelCase : List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_lowerCamelCase : int = False
for r in range(_lowerCAmelCase ):
_lowerCamelCase : Any = pow(_lowerCAmelCase , d * 2**r , _lowerCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_lowerCamelCase : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def A_ ( ):
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ : Tuple = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(s_dict.keys() )
for key in keys:
_lowerCamelCase : Union[str, Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowerCamelCase : Tuple = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(F'{key} -> {new_key}' )
_lowerCamelCase : str = s_dict.pop(_lowerCAmelCase )
return s_dict
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = emb.weight.shape
_lowerCamelCase : Optional[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
_lowerCamelCase : List[str] = emb.weight.data
return lin_layer
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
_lowerCamelCase : str = os.path.basename(_lowerCAmelCase )
_lowerCamelCase : int = url.split("/" )[-2]
_lowerCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not os.path.isfile(_lowerCAmelCase ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = open(_lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(_lowerCAmelCase ) as source, open(_lowerCAmelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
_lowerCamelCase : List[Any] = source.read(8192 )
if not buffer:
break
output.write(_lowerCAmelCase )
loop.update(len(_lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = open(_lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
_lowerCamelCase : Optional[int] = _download(_MODELS[checkpoint_path] )
else:
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
_lowerCamelCase : Union[str, Any] = original_checkpoint["dims"]
_lowerCamelCase : Optional[int] = original_checkpoint["model_state_dict"]
_lowerCamelCase : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Union[str, Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_lowerCamelCase : List[Any] = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_lowerCAmelCase , decoder_ffn_dim=_lowerCAmelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_lowerCamelCase : List[str] = WhisperForConditionalGeneration(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Any = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F' but all the following weights are missing {missing}' )
if tie_embeds:
_lowerCamelCase : Dict = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCamelCase : int = proj_out_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Any = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path) | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import sys
import turtle
def A_ ( _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] ):
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A_ ( _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : int , ):
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
UpperCAmelCase_ : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
UpperCAmelCase_ : Optional[int] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowerCAmelCase , int(b / 2 ) ) * actual_power(_lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(_lowerCAmelCase , int(b / 2 ) ) * actual_power(_lowerCAmelCase , int(b / 2 ) )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(_lowerCAmelCase , _lowerCAmelCase )
return actual_power(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3)) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'detr'
lowerCAmelCase_ = ['past_key_values']
lowerCAmelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int],__A : Tuple=True,__A : List[Any]=None,__A : List[str]=3,__A : Any=1_0_0,__A : Union[str, Any]=6,__A : Union[str, Any]=2_0_4_8,__A : str=8,__A : Dict=6,__A : Tuple=2_0_4_8,__A : Optional[int]=8,__A : int=0.0,__A : Union[str, Any]=0.0,__A : Optional[Any]=True,__A : Union[str, Any]="relu",__A : str=2_5_6,__A : int=0.1,__A : List[str]=0.0,__A : Union[str, Any]=0.0,__A : int=0.02,__A : Union[str, Any]=1.0,__A : str=False,__A : int="sine",__A : List[str]="resnet50",__A : List[Any]=True,__A : Tuple=False,__A : Dict=1,__A : Optional[Any]=5,__A : Optional[int]=2,__A : List[str]=1,__A : int=1,__A : str=5,__A : List[Any]=2,__A : Union[str, Any]=0.1,**__A : Union[str, Any],):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCamelCase : int = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__A,__A ):
_lowerCamelCase : Union[str, Any] = backbone_config.get("model_type" )
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Optional[int] = config_class.from_dict(__A )
# set timm attributes to None
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = None, None, None
_lowerCamelCase : Tuple = use_timm_backbone
_lowerCamelCase : Dict = backbone_config
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Union[str, Any] = num_queries
_lowerCamelCase : Optional[Any] = d_model
_lowerCamelCase : Any = encoder_ffn_dim
_lowerCamelCase : Optional[Any] = encoder_layers
_lowerCamelCase : int = encoder_attention_heads
_lowerCamelCase : Union[str, Any] = decoder_ffn_dim
_lowerCamelCase : Dict = decoder_layers
_lowerCamelCase : Dict = decoder_attention_heads
_lowerCamelCase : Optional[Any] = dropout
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Union[str, Any] = activation_dropout
_lowerCamelCase : List[Any] = activation_function
_lowerCamelCase : str = init_std
_lowerCamelCase : Dict = init_xavier_std
_lowerCamelCase : Tuple = encoder_layerdrop
_lowerCamelCase : List[str] = decoder_layerdrop
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : List[str] = auxiliary_loss
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : Union[str, Any] = backbone
_lowerCamelCase : str = use_pretrained_backbone
_lowerCamelCase : Union[str, Any] = dilation
# Hungarian matcher
_lowerCamelCase : Any = class_cost
_lowerCamelCase : int = bbox_cost
_lowerCamelCase : Tuple = giou_cost
# Loss coefficients
_lowerCamelCase : Union[str, Any] = mask_loss_coefficient
_lowerCamelCase : Tuple = dice_loss_coefficient
_lowerCamelCase : str = bbox_loss_coefficient
_lowerCamelCase : Dict = giou_loss_coefficient
_lowerCamelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=__A,**__A )
@property
def lowerCamelCase_ ( self : Tuple ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self : str ):
return self.d_model
@classmethod
def lowerCamelCase_ ( cls : str,__A : PretrainedConfig,**__A : Optional[Any] ):
return cls(backbone_config=__A,**__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCamelCase : List[Any] = self.backbone_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return 1e-5
@property
def lowerCamelCase_ ( self : Optional[int] ):
return 1_2 | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
_lowerCamelCase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCamelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_lowerCamelCase : List[str] = np.concatenate(_lowerCAmelCase , axis=0 )
_lowerCamelCase : Tuple = np.array(_lowerCAmelCase ).astype(np.floataa ) / 2_5_5.0
_lowerCamelCase : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
_lowerCamelCase : List[str] = 2.0 * image - 1.0
_lowerCamelCase : List[Any] = torch.from_numpy(_lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
_lowerCamelCase : List[Any] = torch.cat(_lowerCAmelCase , dim=0 )
return image
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any]=0.9_9_9_5 ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , np.ndarray ):
_lowerCamelCase : Tuple = True
_lowerCamelCase : Any = va.device
_lowerCamelCase : Tuple = va.cpu().numpy()
_lowerCamelCase : Tuple = va.cpu().numpy()
_lowerCamelCase : List[Any] = np.sum(va * va / (np.linalg.norm(_lowerCAmelCase ) * np.linalg.norm(_lowerCAmelCase )) )
if np.abs(_lowerCAmelCase ) > DOT_THRESHOLD:
_lowerCamelCase : List[str] = (1 - t) * va + t * va
else:
_lowerCamelCase : List[Any] = np.arccos(_lowerCAmelCase )
_lowerCamelCase : Tuple = np.sin(_lowerCAmelCase )
_lowerCamelCase : List[str] = theta_a * t
_lowerCamelCase : List[Any] = np.sin(_lowerCAmelCase )
_lowerCamelCase : List[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCamelCase : int = sin_theta_t / sin_theta_a
_lowerCamelCase : List[Any] = sa * va + sa * va
if inputs_are_torch:
_lowerCamelCase : int = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
return va
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = F.normalize(_lowerCAmelCase , dim=-1 )
_lowerCamelCase : Union[str, Any] = F.normalize(_lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
"""simple docstring"""
for param in model.parameters():
_lowerCamelCase : Tuple = value
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],__A : AutoencoderKL,__A : CLIPTextModel,__A : CLIPModel,__A : CLIPTokenizer,__A : UNetaDConditionModel,__A : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],__A : CLIPFeatureExtractor,__A : List[str]=None,__A : Dict=None,__A : int=None,):
super().__init__()
self.register_modules(
vae=__A,text_encoder=__A,clip_model=__A,tokenizer=__A,unet=__A,scheduler=__A,feature_extractor=__A,coca_model=__A,coca_tokenizer=__A,coca_transform=__A,)
_lowerCamelCase : Any = (
feature_extractor.size
if isinstance(feature_extractor.size,__A )
else feature_extractor.size["shortest_edge"]
)
_lowerCamelCase : str = transforms.Normalize(mean=feature_extractor.image_mean,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder,__A )
set_requires_grad(self.clip_model,__A )
def lowerCamelCase_ ( self : List[str],__A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCamelCase : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def lowerCamelCase_ ( self : Optional[Any] ):
self.enable_attention_slicing(__A )
def lowerCamelCase_ ( self : int ):
set_requires_grad(self.vae,__A )
def lowerCamelCase_ ( self : Optional[int] ):
set_requires_grad(self.vae,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
set_requires_grad(self.unet,__A )
def lowerCamelCase_ ( self : List[Any] ):
set_requires_grad(self.unet,__A )
def lowerCamelCase_ ( self : Any,__A : Tuple,__A : Tuple,__A : List[str] ):
# get the original timestep using init_timestep
_lowerCamelCase : Union[str, Any] = min(int(num_inference_steps * strength ),__A )
_lowerCamelCase : int = max(num_inference_steps - init_timestep,0 )
_lowerCamelCase : Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self : Optional[Any],__A : Tuple,__A : Optional[int],__A : Union[str, Any],__A : Dict,__A : List[Any],__A : Optional[Any]=None ):
if not isinstance(__A,torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(__A )}' )
_lowerCamelCase : List[str] = image.to(device=__A,dtype=__A )
if isinstance(__A,__A ):
_lowerCamelCase : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__A )
]
_lowerCamelCase : List[str] = torch.cat(__A,dim=0 )
else:
_lowerCamelCase : Union[str, Any] = self.vae.encode(__A ).latent_dist.sample(__A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCamelCase : int = 0.18215 * init_latents
_lowerCamelCase : int = init_latents.repeat_interleave(__A,dim=0 )
_lowerCamelCase : List[Any] = randn_tensor(init_latents.shape,generator=__A,device=__A,dtype=__A )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__A,__A,__A )
_lowerCamelCase : Union[str, Any] = init_latents
return latents
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int] ):
_lowerCamelCase : str = self.coca_transform(__A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCamelCase : Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device,dtype=self.coca_model.dtype ) )
_lowerCamelCase : int = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>","" ).rstrip(" .," )
def lowerCamelCase_ ( self : Any,__A : Tuple,__A : Tuple ):
_lowerCamelCase : Optional[int] = self.feature_extractor.preprocess(__A )
_lowerCamelCase : Optional[Any] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCamelCase : Dict = self.clip_model.get_image_features(__A )
_lowerCamelCase : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2,dim=-1,keepdim=__A )
_lowerCamelCase : List[str] = image_embeddings_clip.repeat_interleave(__A,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCamelCase_ ( self : str,__A : List[str],__A : Union[str, Any],__A : List[Any],__A : Dict,__A : List[Any],__A : Union[str, Any],__A : Union[str, Any],):
_lowerCamelCase : Dict = latents.detach().requires_grad_()
_lowerCamelCase : Dict = self.scheduler.scale_model_input(__A,__A )
# predict the noise residual
_lowerCamelCase : int = self.unet(__A,__A,encoder_hidden_states=__A ).sample
if isinstance(self.scheduler,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCamelCase : List[str] = self.scheduler.alphas_cumprod[timestep]
_lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCamelCase : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCamelCase : str = torch.sqrt(__A )
_lowerCamelCase : List[Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler,__A ):
_lowerCamelCase : List[str] = self.scheduler.sigmas[index]
_lowerCamelCase : Optional[Any] = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCamelCase : List[Any] = 1 / 0.18215 * sample
_lowerCamelCase : Any = self.vae.decode(__A ).sample
_lowerCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0,1 )
_lowerCamelCase : str = transforms.Resize(self.feature_extractor_size )(__A )
_lowerCamelCase : Optional[int] = self.normalize(__A ).to(latents.dtype )
_lowerCamelCase : Any = self.clip_model.get_image_features(__A )
_lowerCamelCase : Union[str, Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2,dim=-1,keepdim=__A )
_lowerCamelCase : Union[str, Any] = spherical_dist_loss(__A,__A ).mean() * clip_guidance_scale
_lowerCamelCase : str = -torch.autograd.grad(__A,__A )[0]
if isinstance(self.scheduler,__A ):
_lowerCamelCase : List[str] = latents.detach() + grads * (sigma**2)
_lowerCamelCase : Optional[Any] = noise_pred_original
else:
_lowerCamelCase : List[Any] = noise_pred_original - torch.sqrt(__A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int],__A : Union[torch.FloatTensor, PIL.Image.Image],__A : Union[torch.FloatTensor, PIL.Image.Image],__A : Optional[str] = None,__A : Optional[str] = None,__A : Optional[int] = 5_1_2,__A : Optional[int] = 5_1_2,__A : float = 0.6,__A : Optional[int] = 5_0,__A : Optional[float] = 7.5,__A : Optional[int] = 1,__A : float = 0.0,__A : Optional[float] = 1_0_0,__A : Optional[torch.Generator] = None,__A : Optional[str] = "pil",__A : bool = True,__A : float = 0.8,__A : float = 0.1,__A : float = 0.1,):
if isinstance(__A,__A ) and len(__A ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(__A )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(__A,torch.Generator ) and batch_size > 1:
_lowerCamelCase : List[str] = [generator] + [None] * (batch_size - 1)
_lowerCamelCase : Dict = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_lowerCamelCase : str = [x[0] for x in coca_is_none if x[1]]
_lowerCamelCase : List[Any] = ", ".join(__A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__A ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
_lowerCamelCase : List[str] = self.get_image_description(__A )
if style_prompt is None:
if len(__A ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
_lowerCamelCase : List[str] = self.get_image_description(__A )
# get prompt text embeddings for content and style
_lowerCamelCase : str = self.tokenizer(
__A,padding="max_length",max_length=self.tokenizer.model_max_length,truncation=__A,return_tensors="pt",)
_lowerCamelCase : Optional[int] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCamelCase : Optional[int] = self.tokenizer(
__A,padding="max_length",max_length=self.tokenizer.model_max_length,truncation=__A,return_tensors="pt",)
_lowerCamelCase : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCamelCase : Optional[int] = slerp(__A,__A,__A )
# duplicate text embeddings for each generation per prompt
_lowerCamelCase : Optional[int] = text_embeddings.repeat_interleave(__A,dim=0 )
# set timesteps
_lowerCamelCase : List[str] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCamelCase : Union[str, Any] = {}
if accepts_offset:
_lowerCamelCase : Dict = 1
self.scheduler.set_timesteps(__A,**__A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCamelCase , _lowerCamelCase : Any = self.get_timesteps(__A,__A,self.device )
_lowerCamelCase : Optional[int] = timesteps[:1].repeat(__A )
# Preprocess image
_lowerCamelCase : int = preprocess(__A,__A,__A )
_lowerCamelCase : Optional[int] = self.prepare_latents(
__A,__A,__A,text_embeddings.dtype,self.device,__A )
_lowerCamelCase : Tuple = preprocess(__A,__A,__A )
_lowerCamelCase : Optional[int] = self.prepare_latents(
__A,__A,__A,text_embeddings.dtype,self.device,__A )
_lowerCamelCase : Tuple = slerp(__A,__A,__A )
if clip_guidance_scale > 0:
_lowerCamelCase : Dict = self.get_clip_image_embeddings(__A,__A )
_lowerCamelCase : Dict = self.get_clip_image_embeddings(__A,__A )
_lowerCamelCase : Any = slerp(
__A,__A,__A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCamelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCamelCase : Union[str, Any] = content_text_input.input_ids.shape[-1]
_lowerCamelCase : Tuple = self.tokenizer([""],padding="max_length",max_length=__A,return_tensors="pt" )
_lowerCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCamelCase : str = uncond_embeddings.repeat_interleave(__A,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCamelCase : List[str] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCamelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCamelCase : List[Any] = torch.randn(__A,generator=__A,device="cpu",dtype=__A ).to(
self.device )
else:
_lowerCamelCase : Any = torch.randn(__A,generator=__A,device=self.device,dtype=__A )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_lowerCamelCase : str = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCamelCase : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCamelCase : Any = {}
if accepts_eta:
_lowerCamelCase : List[str] = eta
# check if the scheduler accepts generator
_lowerCamelCase : str = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCamelCase : str = generator
with self.progress_bar(total=__A ):
for i, t in enumerate(__A ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(__A,__A )
# predict the noise residual
_lowerCamelCase : Optional[Any] = self.unet(__A,__A,encoder_hidden_states=__A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase : Any = noise_pred.chunk(2 )
_lowerCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCamelCase : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCamelCase , _lowerCamelCase : Dict = self.cond_fn(
__A,__A,__A,__A,__A,__A,__A,)
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(__A,__A,__A,**__A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCamelCase : List[Any] = 1 / 0.18215 * latents
_lowerCamelCase : Dict = self.vae.decode(__A ).sample
_lowerCamelCase : Optional[Any] = (image / 2 + 0.5).clamp(0,1 )
_lowerCamelCase : List[str] = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = self.numpy_to_pil(__A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__A,nsfw_content_detected=__A ) | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase_ : Dict = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : int = list(s_dict.keys() )
for key in keys:
_lowerCamelCase : List[Any] = r".*/layers_(\d+)"
_lowerCamelCase : int = key
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = re.sub(r"layers_(\d+)" , r"block/\1/layer" , _lowerCAmelCase )
_lowerCamelCase : str = r"(encoder|decoder)\/"
if re.match(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Optional[int] = re.match(_lowerCAmelCase , _lowerCAmelCase ).groups()
if groups[0] == "encoder":
_lowerCamelCase : Tuple = re.sub(r"/mlp/" , r"/1/mlp/" , _lowerCAmelCase )
_lowerCamelCase : Any = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , _lowerCAmelCase )
elif groups[0] == "decoder":
_lowerCamelCase : int = re.sub(r"/mlp/" , r"/2/mlp/" , _lowerCAmelCase )
_lowerCamelCase : int = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , _lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_lowerCamelCase : Optional[int] = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(F'{key} -> {new_key}' )
_lowerCamelCase : Any = s_dict.pop(_lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCamelCase : Dict = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_lowerCamelCase : Dict = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_lowerCamelCase : List[str] = s_dict[key].shape[0]
_lowerCamelCase : Optional[Any] = s_dict[key]
for idx in range(_lowerCAmelCase ):
_lowerCamelCase : int = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(_lowerCAmelCase )
return s_dict
UpperCAmelCase_ : Any = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
import regex as re
with open(_lowerCAmelCase , "r" ) as f:
_lowerCamelCase : int = f.read()
_lowerCamelCase : Any = re.findall(r"(.*) = ([0-9.]*)" , _lowerCAmelCase )
_lowerCamelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_lowerCamelCase : List[Any] = float(_lowerCAmelCase ) if "." in value else int(_lowerCAmelCase )
_lowerCamelCase : List[str] = re.findall(r"(.*activations) = \(\'(.*)\',\)" , _lowerCAmelCase )[0]
_lowerCamelCase : Union[str, Any] = str(activation[1] )
_lowerCamelCase : Any = num_experts
_lowerCamelCase : str = SwitchTransformersConfig(**_lowerCAmelCase )
return config
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]="./" , _lowerCAmelCase : Tuple=8 ):
"""simple docstring"""
print(F'Loading flax weights from : {flax_checkpoint_path}' )
_lowerCamelCase : str = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
if gin_file is not None:
_lowerCamelCase : List[Any] = convert_gin_to_config(_lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCamelCase : Dict = SwitchTransformersConfig.from_pretrained(_lowerCAmelCase )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration(_lowerCAmelCase )
_lowerCamelCase : List[Any] = flax_params["target"]
_lowerCamelCase : Tuple = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[Any] = rename_keys(_lowerCAmelCase )
_lowerCamelCase : int = unflatten_dict(_lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
UpperCAmelCase_ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
) | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[Any],*__A : str,**__A : Union[str, Any] ):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead.",__A,)
super().__init__(*__A,**__A ) | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
from typing import Any
class UpperCAmelCase__ :
def __init__( self : Any,__A : Any ):
_lowerCamelCase : Optional[Any] = data
_lowerCamelCase : List[str] = None
def __repr__( self : int ):
return f'Node({self.data})'
class UpperCAmelCase__ :
def __init__( self : Union[str, Any] ):
_lowerCamelCase : str = None
def __iter__( self : Dict ):
_lowerCamelCase : int = self.head
while node:
yield node.data
_lowerCamelCase : List[str] = node.next
def __len__( self : Union[str, Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(__A ) for item in self] )
def __getitem__( self : List[str],__A : int ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Optional[Any],__A : int,__A : Any ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
_lowerCamelCase : List[Any] = self.head
for _ in range(__A ):
_lowerCamelCase : str = current.next
_lowerCamelCase : Optional[int] = data
def lowerCamelCase_ ( self : Any,__A : Any ):
self.insert_nth(len(self ),__A )
def lowerCamelCase_ ( self : str,__A : Any ):
self.insert_nth(0,__A )
def lowerCamelCase_ ( self : Optional[Any],__A : int,__A : Any ):
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
_lowerCamelCase : List[Any] = Node(__A )
if self.head is None:
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0:
_lowerCamelCase : List[str] = self.head # link new_node to head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : List[str] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : Optional[int] = temp.next
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : List[Any] = new_node
def lowerCamelCase_ ( self : str ): # print every node data
print(self )
def lowerCamelCase_ ( self : Optional[int] ):
return self.delete_nth(0 )
def lowerCamelCase_ ( self : Union[str, Any] ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowerCamelCase_ ( self : List[str],__A : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
_lowerCamelCase : int = self.head # default first node
if index == 0:
_lowerCamelCase : int = self.head.next
else:
_lowerCamelCase : List[Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Optional[int] = temp.next
_lowerCamelCase : Any = temp.next.next
return delete_node.data
def lowerCamelCase_ ( self : List[str] ):
return self.head is None
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = self.head
while current:
# Store the current node's next node.
_lowerCamelCase : Tuple = current.next
# Make the current node's next point backwards
_lowerCamelCase : Dict = prev
# Make the previous node be the current node
_lowerCamelCase : Dict = current
# Make the current node the next node (to progress iteration)
_lowerCamelCase : Optional[int] = next_node
# Return prev in order to put the head at the end
_lowerCamelCase : List[Any] = prev
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowerCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowerCAmelCase ) == i
linked_list.insert_nth(_lowerCAmelCase , i + 1 )
assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowerCAmelCase ) == 9
assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_lowerCamelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowerCAmelCase ) == "->".join(str(_lowerCAmelCase ) for i in range(-8 , 1 ) )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : int = [
-9,
100,
Node(77345112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
_lowerCamelCase : Tuple = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowerCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowerCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_lowerCamelCase : Dict = linked_list.delete_head()
assert result == -9
assert (
str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_lowerCamelCase : int = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_lowerCamelCase : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(_lowerCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowerCAmelCase )
assert (
str(_lowerCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowerCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
_lowerCamelCase : Dict = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(_lowerCAmelCase )
print("\nReading/changing Node data using indexing:" )
print(F'Element at Position 1: {linked_list[1]}' )
_lowerCamelCase : Union[str, Any] = input("Enter New Value: " ).strip()
print("New list:" )
print(_lowerCAmelCase )
print(F'length of linked_list is : {len(_lowerCAmelCase )}' )
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
import pprint
import requests
UpperCAmelCase_ : Tuple = 'https://zenquotes.io/api'
def A_ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def A_ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCAmelCase_ : Dict = random_quotes()
pprint.pprint(response) | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if any(not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(_lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any=0 ):
"""simple docstring"""
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict=float("inf" ) ):
"""simple docstring"""
for i in range(points_counts - 1 ):
for j in range(i + 1 , _lowerCAmelCase ):
_lowerCamelCase : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCamelCase : List[Any] = current_dis
return min_dis
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int=float("inf" ) ):
"""simple docstring"""
for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ):
for j in range(max(0 , i - 6 ) , _lowerCAmelCase ):
_lowerCamelCase : str = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCamelCase : List[str] = current_dis
return min_dis
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ):
"""simple docstring"""
if points_counts <= 3:
return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase )
# recursion
_lowerCamelCase : Any = points_counts // 2
_lowerCamelCase : Optional[Any] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase )
_lowerCamelCase : Optional[int] = closest_pair_of_points_sqr(
_lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
_lowerCamelCase : int = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_lowerCAmelCase )
_lowerCamelCase : str = dis_between_closest_in_strip(
_lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase )
return min(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = column_based_sort(_lowerCAmelCase , column=0 )
_lowerCamelCase : Dict = column_based_sort(_lowerCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase_ : str = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points))) | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if num <= 0:
_lowerCamelCase : str = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [True] * (num + 1)
_lowerCamelCase : Dict = []
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : List[Any] = int(math.sqrt(_lowerCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowerCAmelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowerCAmelCase ):
if sieve[i] is True:
_lowerCamelCase : Any = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_lowerCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip()))) | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
def __init__( self : str,__A : Tuple,__A : int=2,__A : List[str]=3,__A : Dict=4,__A : str=2,__A : Tuple=7,__A : List[str]=True,__A : int=True,__A : List[Any]=True,__A : Tuple=True,__A : Optional[Any]=9_9,__A : Optional[int]=3_6,__A : str=3,__A : Tuple=4,__A : Tuple=3_7,__A : Any="gelu",__A : Any=0.1,__A : Any=0.1,__A : Optional[int]=5_1_2,__A : Optional[int]=1_6,__A : int=2,__A : str=0.02,__A : Optional[int]=6,__A : Dict=6,__A : Optional[Any]=3,__A : Any=4,__A : int=None,__A : List[Any]=1_0_0_0,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : str = image_size
_lowerCamelCase : Dict = patch_size
_lowerCamelCase : Any = text_seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Any = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = coordinate_size
_lowerCamelCase : int = shape_size
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : List[Any] = scope
_lowerCamelCase : List[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase : Dict = text_seq_length
_lowerCamelCase : int = (image_size // patch_size) ** 2 + 1
_lowerCamelCase : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase : Optional[Any] = bbox[i, j, 3]
_lowerCamelCase : Tuple = bbox[i, j, 1]
_lowerCamelCase : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase : Union[str, Any] = bbox[i, j, 2]
_lowerCamelCase : Optional[Any] = bbox[i, j, 0]
_lowerCamelCase : Optional[int] = t
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : str = ids_tensor([self.batch_size, self.text_seq_length],self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.text_seq_length],self.num_labels )
_lowerCamelCase : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,coordinate_size=self.coordinate_size,shape_size=self.shape_size,input_size=self.image_size,patch_size=self.patch_size,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : Dict,__A : int,__A : str,__A : str,__A : List[Any],__A : List[Any],__A : Any,__A : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Union[str, Any] = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
_lowerCamelCase : Tuple = model(__A,pixel_values=__A )
_lowerCamelCase : int = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A )
_lowerCamelCase : Optional[Any] = model(__A,bbox=__A,pixel_values=__A,token_type_ids=__A )
_lowerCamelCase : Any = model(__A,bbox=__A,pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase : str = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase : Tuple = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple,__A : Optional[int],__A : Any,__A : Optional[Any],__A : Dict,__A : List[Any],__A : str,__A : List[str],__A : Union[str, Any] ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Union[str, Any] = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[Any] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int,__A : int,__A : Optional[Any],__A : Optional[Any],__A : Optional[int],__A : Dict,__A : str,__A : Optional[int],__A : Optional[Any] ):
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : str = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase_ ( self : str,__A : int,__A : Tuple,__A : Tuple,__A : Dict,__A : Optional[Any],__A : Optional[int],__A : Union[str, Any],__A : List[Any] ):
_lowerCamelCase : Optional[Any] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,start_positions=__A,end_positions=__A,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : Tuple,__A : List[str],__A : Tuple,__A : Optional[int],__A : Optional[Any],__A : Tuple ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = LayoutLMvaModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : str,__A : Dict=False ):
_lowerCamelCase : List[Any] = copy.deepcopy(__A )
if model_class in get_values(__A ):
_lowerCamelCase : Dict = {
k: v.unsqueeze(1 ).expand(-1,self.model_tester.num_choices,-1 ).contiguous()
if isinstance(__A,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
_lowerCamelCase : Optional[Any] = torch.ones(self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in get_values(__A ):
_lowerCamelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
_lowerCamelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in [
*get_values(__A ),
]:
_lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in [
*get_values(__A ),
]:
_lowerCamelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length),dtype=torch.long,device=__A,)
return inputs_dict
def lowerCamelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=__A,return_tensors="pt" ).pixel_values.to(__A )
_lowerCamelCase : str = torch.tensor([[1, 2]] )
_lowerCamelCase : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCamelCase : Optional[int] = model(
input_ids=input_ids.to(__A ),bbox=bbox.to(__A ),pixel_values=pixel_values.to(__A ),)
# verify the logits
_lowerCamelCase : List[str] = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape,__A )
_lowerCamelCase : int = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3],__A,atol=1e-4 ) ) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase__ ( A , A , A , unittest.TestCase ):
lowerCAmelCase_ = StableUnCLIPPipeline
lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[str] = 3_2
_lowerCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_lowerCamelCase : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=__A,projection_dim=__A,intermediate_size=3_7,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,) )
torch.manual_seed(0 )
_lowerCamelCase : int = PriorTransformer(
num_attention_heads=2,attention_head_dim=1_2,embedding_dim=__A,num_layers=1,)
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = DDPMScheduler(
variance_type="fixed_small_log",prediction_type="sample",num_train_timesteps=1_0_0_0,clip_sample=__A,clip_sample_range=5.0,beta_schedule="squaredcos_cap_v2",)
# regular denoising components
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=__A )
_lowerCamelCase : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_lowerCamelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_lowerCamelCase : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=__A,projection_dim=3_2,intermediate_size=3_7,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,) )
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = UNetaDConditionModel(
sample_size=3_2,in_channels=4,out_channels=4,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),block_out_channels=(3_2, 6_4),attention_head_dim=(2, 4),class_embed_type="projection",projection_class_embeddings_input_dim=embedder_projection_dim * 2,cross_attention_dim=__A,layers_per_block=1,upcast_attention=__A,use_linear_projection=__A,)
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = DDIMScheduler(
beta_schedule="scaled_linear",beta_start=0.00085,beta_end=0.012,prediction_type="v_prediction",set_alpha_to_one=__A,steps_offset=1,)
torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = AutoencoderKL()
_lowerCamelCase : str = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def lowerCamelCase_ ( self : List[str],__A : Any,__A : List[str]=0 ):
if str(__A ).startswith("mps" ):
_lowerCamelCase : List[str] = torch.manual_seed(__A )
else:
_lowerCamelCase : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
_lowerCamelCase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__A )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
_lowerCamelCase : List[Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l",torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : int = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCamelCase : Tuple = pipe("anime turle",generator=__A,output_type="np" )
_lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__A,__A )
def lowerCamelCase_ ( self : Optional[int] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCamelCase : Dict = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l",torch_dtype=torch.floataa )
_lowerCamelCase : Optional[Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCamelCase : List[str] = pipe(
"anime turtle",prior_num_inference_steps=2,num_inference_steps=2,output_type="np",)
_lowerCamelCase : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9 | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'rwkv'
lowerCAmelCase_ = {'max_position_embeddings': 'context_length'}
def __init__( self : Optional[Any],__A : List[Any]=5_0_2_7_7,__A : Any=1_0_2_4,__A : Optional[int]=4_0_9_6,__A : int=3_2,__A : Optional[int]=None,__A : Optional[int]=None,__A : Tuple=1e-5,__A : Union[str, Any]=0,__A : Tuple=0,__A : Union[str, Any]=6,__A : int=False,__A : str=True,**__A : str,):
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[Any] = context_length
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
_lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
_lowerCamelCase : int = layer_norm_epsilon
_lowerCamelCase : int = rescale_every
_lowerCamelCase : str = use_cache
_lowerCamelCase : Dict = bos_token_id
_lowerCamelCase : int = eos_token_id
super().__init__(
tie_word_embeddings=__A,bos_token_id=__A,eos_token_id=__A,**__A ) | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase_ : int = logging.getLogger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : Dict,__A : Union[str, Any],__A : Optional[Any],__A : str,__A : Optional[Any]=None ):
super().__init__(
__A,question_encoder_tokenizer=__A,generator_tokenizer=__A,index=__A,init_retrieval=__A,)
_lowerCamelCase : Union[str, Any] = None
def lowerCamelCase_ ( self : Optional[int],__A : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_lowerCamelCase : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_lowerCamelCase : int = str(distributed_port + 1 )
_lowerCamelCase : Any = dist.new_group(ranks=__A,backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase_ ( self : int ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase_ ( self : List[str],__A : int,__A : Dict,__A : Optional[Any]=torch.floataa ):
_lowerCamelCase : Dict = torch.empty(__A,dtype=__A )
dist.scatter(__A,src=0,scatter_list=__A,group=self.process_group )
return target_tensor
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_lowerCamelCase : Optional[int] = next((addr for addr in addrs if addr.startswith("e" )),__A )
return ifname
def lowerCamelCase_ ( self : Any,__A : np.ndarray,__A : int ):
# single GPU training
if not dist.is_initialized():
_lowerCamelCase , _lowerCamelCase : Dict = self._main_retrieve(__A,__A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__A )
# distributed training
_lowerCamelCase : Optional[Any] = dist.get_world_size(group=self.process_group )
# gather logic
_lowerCamelCase : Optional[int] = None
if self._is_main():
_lowerCamelCase : Any = [torch.empty(question_hidden_states.shape,dtype=torch.floataa ) for _ in range(__A )]
dist.gather(torch.tensor(__A ),dst=0,gather_list=__A,group=self.process_group )
# scatter logic
_lowerCamelCase : List[Any] = question_hidden_states.shape[0]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Dict = []
if self._is_main():
assert len(__A ) == world_size
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self._main_retrieve(torch.cat(__A ).numpy(),__A )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = torch.tensor(__A ), torch.tensor(__A )
_lowerCamelCase : Optional[int] = self._chunk_tensor(__A,__A )
_lowerCamelCase : Any = self._chunk_tensor(__A,__A )
_lowerCamelCase : Optional[int] = self._scattered(__A,[n_queries, n_docs],target_type=torch.intaa )
_lowerCamelCase : Optional[int] = self._scattered(__A,[n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__A ) | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
UpperCAmelCase_ : Optional[Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
UpperCAmelCase_ : Optional[Any] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = set()
_lowerCamelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : Any = char
_lowerCamelCase : str = set(_lowerCAmelCase )
return pairs
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str],__A : List[str],__A : Optional[Any],__A : Any="<s>",__A : Union[str, Any]="</s>",__A : Union[str, Any]="</s>",__A : Dict="<s>",__A : Tuple="<unk>",__A : Any="<pad>",__A : List[str]="<mask>",**__A : str,):
super().__init__(
bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,cls_token=__A,pad_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : str = vocab_file
_lowerCamelCase : str = merges_file
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = 1
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Optional[int] = 3
self.add_from_file(__A )
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(__A,encoding="utf-8" ) as merges_handle:
_lowerCamelCase : Optional[int] = merges_handle.read().split("\n" )[:-1]
_lowerCamelCase : Dict = [tuple(merge.split()[:-1] ) for merge in merges]
_lowerCamelCase : Dict = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = {}
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Dict = [self.cls_token_id]
_lowerCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Union[str, Any],__A : List[int],__A : Optional[List[int]] = None,__A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A,token_ids_a=__A,already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def lowerCamelCase_ ( self : int,__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self : int ):
return len(self.encoder )
def lowerCamelCase_ ( self : List[Any] ):
return dict(self.encoder,**self.added_tokens_encoder )
def lowerCamelCase_ ( self : Any,__A : Optional[Any] ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase : Union[str, Any] = tuple(__A )
_lowerCamelCase : str = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_lowerCamelCase : Tuple = get_pairs(__A )
if not pairs:
return token
while True:
_lowerCamelCase : int = min(__A,key=lambda __A : self.bpe_ranks.get(__A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase , _lowerCamelCase : Optional[Any] = bigram
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[str] = 0
while i < len(__A ):
try:
_lowerCamelCase : List[Any] = word.index(__A,__A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase : List[Any] = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase : Any = tuple(__A )
_lowerCamelCase : Any = new_word
if len(__A ) == 1:
break
else:
_lowerCamelCase : Dict = get_pairs(__A )
_lowerCamelCase : Tuple = "@@ ".join(__A )
_lowerCamelCase : Tuple = word[:-4]
_lowerCamelCase : Optional[int] = word
return word
def lowerCamelCase_ ( self : str,__A : Optional[Any] ):
_lowerCamelCase : int = []
_lowerCamelCase : Any = re.findall(r"\S+\n?",__A )
for token in words:
split_tokens.extend(list(self.bpe(__A ).split(" " ) ) )
return split_tokens
def lowerCamelCase_ ( self : str,__A : Union[str, Any] ):
return self.encoder.get(__A,self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : List[str],__A : Optional[Any] ):
return self.decoder.get(__A,self.unk_token )
def lowerCamelCase_ ( self : Optional[int],__A : List[Any] ):
_lowerCamelCase : Dict = " ".join(__A ).replace("@@ ","" ).strip()
return out_string
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[str] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
if os.path.abspath(self.merges_file ) != os.path.abspath(__A ):
copyfile(self.merges_file,__A )
return out_vocab_file, out_merge_file
def lowerCamelCase_ ( self : Union[str, Any],__A : Optional[Any] ):
if isinstance(__A,__A ):
try:
with open(__A,"r",encoding="utf-8" ) as fd:
self.add_from_file(__A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
_lowerCamelCase : Optional[int] = f.readlines()
for lineTmp in lines:
_lowerCamelCase : List[str] = lineTmp.strip()
_lowerCamelCase : int = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
_lowerCamelCase : Tuple = line[:idx]
_lowerCamelCase : Tuple = len(self.encoder ) | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
from math import loga
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'linear'
lowerCAmelCase_ = 'cosine'
lowerCAmelCase_ = 'cosine_with_restarts'
lowerCAmelCase_ = 'polynomial'
lowerCAmelCase_ = 'constant'
lowerCAmelCase_ = 'constant_with_warmup'
lowerCAmelCase_ = 'piecewise_constant'
def A_ ( _lowerCAmelCase : Optimizer , _lowerCAmelCase : int = -1 ):
"""simple docstring"""
return LambdaLR(_lowerCAmelCase , lambda _lowerCAmelCase : 1 , last_epoch=_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optimizer , _lowerCAmelCase : int , _lowerCAmelCase : int = -1 ):
"""simple docstring"""
def lr_lambda(_lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCAmelCase ) / float(max(1.0 , _lowerCAmelCase ) )
return 1.0
return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , last_epoch=_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optimizer , _lowerCAmelCase : str , _lowerCAmelCase : int = -1 ):
"""simple docstring"""
_lowerCamelCase : List[str] = {}
_lowerCamelCase : str = step_rules.split("," )
for rule_str in rule_list[:-1]:
_lowerCamelCase , _lowerCamelCase : Any = rule_str.split(":" )
_lowerCamelCase : List[Any] = int(_lowerCAmelCase )
_lowerCamelCase : int = float(_lowerCAmelCase )
_lowerCamelCase : Any = value
_lowerCamelCase : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
def rule_func(_lowerCAmelCase : int ) -> float:
_lowerCamelCase : Tuple = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_lowerCamelCase : Union[str, Any] = create_rules_function(_lowerCAmelCase , _lowerCAmelCase )
return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , last_epoch=_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=-1 ):
"""simple docstring"""
def lr_lambda(_lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCAmelCase ) / float(max(1 , _lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optimizer , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 0.5 , _lowerCAmelCase : int = -1 ):
"""simple docstring"""
def lr_lambda(_lowerCAmelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(_lowerCAmelCase ) / float(max(1 , _lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optimizer , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = -1 ):
"""simple docstring"""
def lr_lambda(_lowerCAmelCase : Optional[Any] ):
if current_step < num_warmup_steps:
return float(_lowerCAmelCase ) / float(max(1 , _lowerCAmelCase ) )
_lowerCamelCase : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=1E-7 , _lowerCAmelCase : Tuple=1.0 , _lowerCAmelCase : int=-1 ):
"""simple docstring"""
_lowerCamelCase : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(_lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCAmelCase ) / float(max(1 , _lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_lowerCamelCase : Any = lr_init - lr_end
_lowerCamelCase : List[Any] = num_training_steps - num_warmup_steps
_lowerCamelCase : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_lowerCamelCase : Dict = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A_ ( _lowerCAmelCase : Union[str, SchedulerType] , _lowerCAmelCase : Optimizer , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : int = 1 , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : int = -1 , ):
"""simple docstring"""
_lowerCamelCase : Dict = SchedulerType(_lowerCAmelCase )
_lowerCamelCase : List[str] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCAmelCase , last_epoch=_lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCAmelCase , step_rules=_lowerCAmelCase , last_epoch=_lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCAmelCase , num_warmup_steps=_lowerCAmelCase , last_epoch=_lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCAmelCase , num_warmup_steps=_lowerCAmelCase , num_training_steps=_lowerCAmelCase , num_cycles=_lowerCAmelCase , last_epoch=_lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCAmelCase , num_warmup_steps=_lowerCAmelCase , num_training_steps=_lowerCAmelCase , power=_lowerCAmelCase , last_epoch=_lowerCAmelCase , )
return schedule_func(
_lowerCAmelCase , num_warmup_steps=_lowerCAmelCase , num_training_steps=_lowerCAmelCase , last_epoch=_lowerCAmelCase ) | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
if os.path.exists(_lowerCAmelCase ):
if os.path.exists(os.path.join(_lowerCAmelCase , "config.json" ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , "config.json" ) ):
os.remove(os.path.join(_lowerCAmelCase , "config.json" ) )
if os.path.exists(os.path.join(_lowerCAmelCase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(_lowerCAmelCase , "pytorch_model.bin" ) ):
os.remove(os.path.join(_lowerCAmelCase , "pytorch_model.bin" ) )
else:
os.makedirs(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 2
if unlogit:
_lowerCamelCase : List[Any] = torch.pow(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : int = p * torch.log(_lowerCAmelCase )
_lowerCamelCase : List[Any] = 0
return -plogp.sum(dim=-1 )
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
logger.info("lv, h >\t" + "\t".join(F'{x + 1}' for x in range(len(_lowerCAmelCase ) ) ) )
for row in range(len(_lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + "\t".join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + "\t".join(F'{x:d}' for x in tensor[row].cpu().data ) )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Any=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : int=None , _lowerCAmelCase : str=False ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCamelCase : Tuple = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
_lowerCamelCase : Dict = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
if head_mask is None:
_lowerCamelCase : Any = torch.ones(_lowerCAmelCase , _lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = 0.0
_lowerCamelCase : str = 0.0
for step, inputs in enumerate(tqdm(_lowerCAmelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_lowerCamelCase : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((_lowerCamelCase) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase , head_mask=_lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = entropy(attn.detach() , _lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCamelCase : Any = 2
_lowerCamelCase : int = torch.pow(torch.pow(_lowerCAmelCase , _lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_lowerCamelCase : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(_lowerCAmelCase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(_lowerCAmelCase )
logger.info("Head ranked by importance scores" )
_lowerCamelCase : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCamelCase : int = torch.arange(
head_importance.numel() , device=args.device )
_lowerCamelCase : Optional[int] = head_ranks.view_as(_lowerCAmelCase )
print_ad_tensor(_lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase )
_lowerCamelCase : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _lowerCAmelCase , original_score * args.masking_threshold )
_lowerCamelCase : Tuple = torch.ones_like(_lowerCAmelCase )
_lowerCamelCase : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCamelCase : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCamelCase : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCamelCase : Union[str, Any] = float("Inf" )
_lowerCamelCase : List[str] = head_importance.view(-1 ).sort()[1]
if len(_lowerCAmelCase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_lowerCamelCase : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_lowerCamelCase : Tuple = new_head_mask.view(-1 )
_lowerCamelCase : Any = 0.0
_lowerCamelCase : Tuple = new_head_mask.view_as(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(_lowerCAmelCase )
# Compute metric and head importance again
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , head_mask=_lowerCAmelCase )
_lowerCamelCase : Any = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(_lowerCAmelCase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase )
_lowerCamelCase : Any = 1 / loss
_lowerCamelCase : str = datetime.now() - before_time
_lowerCamelCase : Optional[Any] = sum(p.numel() for p in model.parameters() )
_lowerCamelCase : Union[str, Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = [
v,
]
assert sum(len(_lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCAmelCase )
_lowerCamelCase : Dict = sum(p.numel() for p in model.parameters() )
_lowerCamelCase : Tuple = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = compute_heads_importance(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase , actually_pruned=_lowerCAmelCase , )
_lowerCamelCase : Optional[Any] = 1 / loss
_lowerCamelCase : List[str] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowerCAmelCase , _lowerCAmelCase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowerCAmelCase , _lowerCAmelCase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(_lowerCAmelCase , args.output_dir )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_lowerCAmelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_lowerCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_lowerCAmelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_lowerCAmelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_lowerCAmelCase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=_lowerCAmelCase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowerCAmelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_lowerCAmelCase , help="Batch size." )
parser.add_argument("--seed" , type=_lowerCAmelCase , default=42 )
parser.add_argument("--local_rank" , type=_lowerCAmelCase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=_lowerCAmelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowerCAmelCase , default="" , help="Can be used for distant debugging." )
_lowerCamelCase : Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCamelCase : Dict = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_lowerCamelCase : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCamelCase : str = torch.device("cuda" , args.local_rank )
_lowerCamelCase : int = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCamelCase : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCamelCase : List[str] = nn.parallel.DistributedDataParallel(
_lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCAmelCase )
elif args.n_gpu > 1:
_lowerCamelCase : str = nn.DataParallel(_lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCAmelCase )
torch.save(_lowerCAmelCase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , _lowerCAmelCase )
# Prepare dataset
_lowerCamelCase : List[str] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCamelCase : str = (torch.from_numpy(_lowerCAmelCase ),)
_lowerCamelCase : Union[str, Any] = TensorDataset(*_lowerCAmelCase )
_lowerCamelCase : Any = RandomSampler(_lowerCAmelCase )
_lowerCamelCase : int = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCamelCase : List[Any] = mask_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
prune_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=None,__A : str=None ):
# Input as list
_lowerCamelCase : List[Any] = list(poly_a or [0] )[:]
_lowerCamelCase : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_lowerCamelCase : str = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_lowerCamelCase : Any = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_lowerCamelCase : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_lowerCamelCase : int = complex(mpmath.root(x=1,n=self.c_max_length,k=1 ) )
# The product
_lowerCamelCase : Any = self.__multiply()
def lowerCamelCase_ ( self : List[str],__A : List[Any] ):
_lowerCamelCase : int = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(__A ) <= 1:
return dft[0]
#
_lowerCamelCase : Optional[int] = self.c_max_length // 2
while next_ncol > 0:
_lowerCamelCase : Union[str, Any] = [[] for i in range(__A )]
_lowerCamelCase : List[str] = self.root**next_ncol
# First half of next step
_lowerCamelCase : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__A ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_lowerCamelCase : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__A ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_lowerCamelCase : Dict = new_dft
_lowerCamelCase : Dict = next_ncol // 2
return dft[0]
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Tuple = self.__dft("A" )
_lowerCamelCase : str = self.__dft("B" )
_lowerCamelCase : Dict = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_lowerCamelCase : Union[str, Any] = 2
while next_ncol <= self.c_max_length:
_lowerCamelCase : Dict = [[] for i in range(__A )]
_lowerCamelCase : Optional[Any] = self.root ** (next_ncol // 2)
_lowerCamelCase : Optional[Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_lowerCamelCase : List[Any] = new_inverse_c
next_ncol *= 2
# Unpack
_lowerCamelCase : Any = [round(x[0].real,8 ) + round(x[0].imag,8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Any ):
_lowerCamelCase : Dict = "A = " + " + ".join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_lowerCamelCase : Any = "B = " + " + ".join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_lowerCamelCase : Optional[Any] = "A*B = " + " + ".join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : Union[str, Any],__A : Optional[Any]=1_3,__A : Optional[Any]=3_2,__A : Union[str, Any]=3,__A : List[Any]=4,__A : List[str]=[1_0, 2_0, 3_0, 4_0],__A : Tuple=[2, 2, 3, 2],__A : int=True,__A : List[str]=True,__A : Dict=3_7,__A : Optional[int]="gelu",__A : Dict=1_0,__A : Optional[Any]=0.02,__A : Any=["stage2", "stage3", "stage4"],__A : List[Any]=3,__A : Any=None,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Tuple = num_stages
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : Dict = depths
_lowerCamelCase : Dict = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : List[Any] = out_features
_lowerCamelCase : int = num_labels
_lowerCamelCase : Union[str, Any] = scope
_lowerCamelCase : str = num_stages
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Dict ):
return ConvNextConfig(
num_channels=self.num_channels,num_stages=self.num_stages,hidden_sizes=self.hidden_sizes,depths=self.depths,is_training=self.is_training,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,out_features=self.out_features,)
def lowerCamelCase_ ( self : List[str] ):
return UperNetConfig(
backbone_config=self.get_backbone_config(),hidden_size=5_1_2,pool_scales=[1, 2, 3, 6],use_auxiliary_head=__A,auxiliary_loss_weight=0.4,auxiliary_in_channels=4_0,auxiliary_channels=2_5_6,auxiliary_num_convs=1,auxiliary_concat_input=__A,loss_ignore_index=2_5_5,num_labels=self.num_labels,)
def lowerCamelCase_ ( self : Optional[int],__A : List[Any],__A : str,__A : List[Any] ):
_lowerCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase_ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = UperNetModelTester(self )
_lowerCamelCase : str = ConfigTester(self,config_class=__A,has_text_modality=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : str ):
return
def lowerCamelCase_ ( self : int ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__A )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
def lowerCamelCase_ ( self : int ):
def check_hidden_states_output(__A : int,__A : List[str],__A : Optional[int] ):
_lowerCamelCase : Any = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Any = self.model_tester.num_stages
self.assertEqual(len(__A ),expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
check_hidden_states_output(__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[str] = True
check_hidden_states_output(__A,__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = _config_zero_init(__A )
_lowerCamelCase : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(config=__A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@unittest.skip(reason="UperNet does not have tied weights" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@slow
def lowerCamelCase_ ( self : Any ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = UperNetForSemanticSegmentation.from_pretrained(__A )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
_lowerCamelCase : Any = Image.open(_lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
_lowerCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__A )
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : Dict = processor(images=__A,return_tensors="pt" ).to(__A )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__A )
_lowerCamelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Optional[Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3],__A,atol=1e-4 ) )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
_lowerCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__A )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Optional[Any] = processor(images=__A,return_tensors="pt" ).to(__A )
with torch.no_grad():
_lowerCamelCase : Any = model(**__A )
_lowerCamelCase : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3],__A,atol=1e-4 ) ) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] = BlipImageProcessor()
_lowerCamelCase : Union[str, Any] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
_lowerCamelCase : List[str] = BlipaProcessor(__A,__A )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Union[str, Any],**__A : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__A ).tokenizer
def lowerCamelCase_ ( self : Tuple,**__A : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__A ).image_processor
def lowerCamelCase_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = [np.random.randint(2_5_5,size=(3, 3_0, 4_0_0),dtype=np.uinta )]
_lowerCamelCase : Optional[Any] = [Image.fromarray(np.moveaxis(__A,0,-1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Dict = self.get_tokenizer(bos_token="(BOS)",eos_token="(EOS)" )
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=__A,padding_value=1.0 )
_lowerCamelCase : Union[str, Any] = BlipaProcessor.from_pretrained(
self.tmpdirname,bos_token="(BOS)",eos_token="(EOS)",do_normalize=__A,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer,__A )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCamelCase : Tuple = image_processor(__A,return_tensors="np" )
_lowerCamelCase : int = processor(images=__A,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1e-2 )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Tuple = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Dict = "lower newer"
_lowerCamelCase : Optional[Any] = processor(text=__A )
_lowerCamelCase : Any = tokenizer(__A,return_token_type_ids=__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key],encoded_processor[key] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : int = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : int = self.prepare_image_inputs()
_lowerCamelCase : Any = processor(text=__A,images=__A )
self.assertListEqual(list(inputs.keys() ),["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Tuple = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Tuple = processor.batch_decode(__A )
_lowerCamelCase : Any = tokenizer.batch_decode(__A )
self.assertListEqual(__A,__A )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : str = BlipaProcessor(tokenizer=__A,image_processor=__A )
_lowerCamelCase : List[Any] = "lower newer"
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = processor(text=__A,images=__A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ),["pixel_values", "input_ids", "attention_mask"] ) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
UpperCAmelCase_ : Tuple = ['gpt2']
UpperCAmelCase_ : Optional[Any] = 'gpt2'
if is_tf_available():
class UpperCAmelCase__ ( tf.Module ):
def __init__( self : Any,__A : Dict ):
super().__init__()
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoConfig.from_pretrained(__A )
_lowerCamelCase : Optional[Any] = TFGPTaLMHeadModel.from_config(__A )
@tf.function(input_signature=(tf.TensorSpec((None,),tf.string,name="text" ),) )
def lowerCamelCase_ ( self : List[Any],__A : Tuple ):
_lowerCamelCase : int = self.tokenizer(__A )
_lowerCamelCase : str = tokenized["input_ids"].to_tensor()
_lowerCamelCase : Tuple = tf.cast(input_ids_dense > 0,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_lowerCamelCase : List[Any] = self.model(input_ids=__A,attention_mask=__A )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
super().setUp()
_lowerCamelCase : Any = [GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_lowerCamelCase : int = [TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCamelCase : str = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_lowerCamelCase : int = list(zip(self.test_sentences,self.test_sentences[::-1] ) )
def lowerCamelCase_ ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_lowerCamelCase : str = tokenizer([test_inputs],return_tensors="tf" )
_lowerCamelCase : int = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_lowerCamelCase : str = python_outputs[key].numpy()
_lowerCamelCase : List[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__A,tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase_ ( self : int ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Optional[Any] = tf.function(__A )
for test_inputs in self.test_sentences:
_lowerCamelCase : List[Any] = tf.constant(__A )
_lowerCamelCase : str = compiled_tokenizer(__A )
_lowerCamelCase : List[Any] = tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : int = ModelToSave(tokenizer=__A )
_lowerCamelCase : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : Union[str, Any] = model.serving(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCamelCase : Optional[int] = Path(__A ) / "saved.model"
tf.saved_model.save(__A,__A,signatures={"serving_default": model.serving} )
_lowerCamelCase : Tuple = tf.saved_model.load(__A )
_lowerCamelCase : Optional[Any] = loaded_model.signatures["serving_default"](__A )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase_ ( self : Any ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : str = tf_tokenizer(__A ) # Build model with some sample inputs
_lowerCamelCase : List[Any] = tf_tokenizer.get_config()
_lowerCamelCase : Union[str, Any] = TFGPTaTokenizer.from_config(__A )
_lowerCamelCase : int = model_from_config(__A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_lowerCamelCase : Any = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
_lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : Optional[Any] = tf_tokenizer(__A,max_length=__A )
_lowerCamelCase : Optional[Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : Any = UNetaDModel(
block_out_channels=(3_2, 6_4),layers_per_block=2,sample_size=3_2,in_channels=3,out_channels=3,down_block_types=("DownBlock2D", "AttnDownBlock2D"),up_block_types=("AttnUpBlock2D", "UpBlock2D"),)
return model
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = self.dummy_uncond_unet
_lowerCamelCase : List[str] = KarrasVeScheduler()
_lowerCamelCase : Union[str, Any] = KarrasVePipeline(unet=__A,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = pipe(num_inference_steps=2,generator=__A,output_type="numpy" ).images
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : str = pipe(num_inference_steps=2,generator=__A,output_type="numpy",return_dict=__A )[0]
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : int = "google/ncsnpp-celebahq-256"
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = KarrasVeScheduler()
_lowerCamelCase : Dict = KarrasVePipeline(unet=__A,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCamelCase : Any = pipe(num_inference_steps=2_0,generator=__A,output_type="numpy" ).images
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_lowerCamelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'wav2vec2'
def __init__( self : Dict,__A : List[str]=3_2,__A : Union[str, Any]=7_6_8,__A : Optional[int]=1_2,__A : Any=1_2,__A : int=3_0_7_2,__A : Tuple="gelu",__A : str=0.1,__A : int=0.1,__A : Tuple=0.1,__A : Union[str, Any]=0.0,__A : Union[str, Any]=0.0,__A : List[str]=0.1,__A : Tuple=0.1,__A : int=0.02,__A : Optional[int]=1e-5,__A : Optional[int]="group",__A : Any="gelu",__A : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2),__A : int=(5, 2, 2, 2, 2, 2, 2),__A : List[str]=(1_0, 3, 3, 3, 3, 2, 2),__A : Dict=False,__A : Dict=1_2_8,__A : Tuple=1_6,__A : Optional[int]=False,__A : List[Any]=True,__A : Any=0.05,__A : str=1_0,__A : Optional[int]=2,__A : Optional[int]=0.0,__A : int=1_0,__A : Optional[Any]=0,__A : Optional[Any]=3_2_0,__A : Optional[int]=2,__A : str=0.1,__A : str=1_0_0,__A : Union[str, Any]=2_5_6,__A : str=2_5_6,__A : List[Any]=0.1,__A : Union[str, Any]="sum",__A : Tuple=False,__A : Optional[Any]=False,__A : Any=2_5_6,__A : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0),__A : List[Any]=(5, 3, 3, 1, 1),__A : List[str]=(1, 2, 3, 1, 1),__A : Optional[Any]=5_1_2,__A : Tuple=0,__A : int=1,__A : List[str]=2,__A : Tuple=False,__A : str=3,__A : Dict=2,__A : List[str]=3,__A : int=None,__A : Optional[Any]=None,**__A : Optional[Any],):
super().__init__(**__A,pad_token_id=__A,bos_token_id=__A,eos_token_id=__A )
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[Any] = feat_extract_norm
_lowerCamelCase : Optional[Any] = feat_extract_activation
_lowerCamelCase : List[str] = list(__A )
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : int = list(__A )
_lowerCamelCase : str = conv_bias
_lowerCamelCase : List[Any] = num_conv_pos_embeddings
_lowerCamelCase : Any = num_conv_pos_embedding_groups
_lowerCamelCase : Optional[int] = len(self.conv_dim )
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : List[Any] = activation_dropout
_lowerCamelCase : Any = feat_proj_dropout
_lowerCamelCase : Dict = final_dropout
_lowerCamelCase : Optional[int] = layerdrop
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Dict = do_stable_layer_norm
_lowerCamelCase : Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : int = apply_spec_augment
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[Any] = mask_time_min_masks
_lowerCamelCase : List[Any] = mask_feature_prob
_lowerCamelCase : Optional[Any] = mask_feature_length
_lowerCamelCase : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : List[Any] = num_codevectors_per_group
_lowerCamelCase : Tuple = num_codevector_groups
_lowerCamelCase : int = contrastive_logits_temperature
_lowerCamelCase : str = feat_quantizer_dropout
_lowerCamelCase : List[str] = num_negatives
_lowerCamelCase : Optional[Any] = codevector_dim
_lowerCamelCase : Optional[int] = proj_codevector_dim
_lowerCamelCase : int = diversity_loss_weight
# ctc loss
_lowerCamelCase : Any = ctc_loss_reduction
_lowerCamelCase : int = ctc_zero_infinity
# adapter
_lowerCamelCase : Any = add_adapter
_lowerCamelCase : Tuple = adapter_kernel_size
_lowerCamelCase : Union[str, Any] = adapter_stride
_lowerCamelCase : Dict = num_adapter_layers
_lowerCamelCase : Optional[Any] = output_hidden_size or hidden_size
_lowerCamelCase : int = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : int = list(__A )
_lowerCamelCase : Optional[Any] = list(__A )
_lowerCamelCase : List[Any] = list(__A )
_lowerCamelCase : str = xvector_output_dim
@property
def lowerCamelCase_ ( self : Optional[int] ):
return functools.reduce(operator.mul,self.conv_stride,1 ) | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int = 600851475143 ):
"""simple docstring"""
try:
_lowerCamelCase : int = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_lowerCamelCase : Union[str, Any] = i
while n % i == 0:
_lowerCamelCase : Optional[int] = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : List[str] = 1.0_5457_1817E-34 # unit of ℏ : J * s
UpperCAmelCase_ : Any = 3E8 # unit of c : m * s^-1
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCamelCase : List[str] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCamelCase : Dict = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCamelCase : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'owlvit_text_model'
def __init__( self : List[Any],__A : int=4_9_4_0_8,__A : int=5_1_2,__A : int=2_0_4_8,__A : str=1_2,__A : List[str]=8,__A : Optional[Any]=1_6,__A : Dict="quick_gelu",__A : Tuple=1e-5,__A : Optional[Any]=0.0,__A : Any=0.02,__A : Any=1.0,__A : str=0,__A : Dict=4_9_4_0_6,__A : str=4_9_4_0_7,**__A : Dict,):
super().__init__(pad_token_id=__A,bos_token_id=__A,eos_token_id=__A,**__A )
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[Any] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Optional[Any] = attention_dropout
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Any = initializer_factor
@classmethod
def lowerCamelCase_ ( cls : int,__A : Union[str, os.PathLike],**__A : Tuple ):
cls._set_token_in_kwargs(__A )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = cls.get_config_dict(__A,**__A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_lowerCamelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A,**__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'owlvit_vision_model'
def __init__( self : Dict,__A : Optional[Any]=7_6_8,__A : Any=3_0_7_2,__A : int=1_2,__A : Optional[int]=1_2,__A : Optional[int]=3,__A : List[Any]=7_6_8,__A : List[str]=3_2,__A : int="quick_gelu",__A : Dict=1e-5,__A : Optional[Any]=0.0,__A : str=0.02,__A : Union[str, Any]=1.0,**__A : int,):
super().__init__(**__A )
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[Any] = patch_size
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : Any = attention_dropout
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[Any] = initializer_factor
@classmethod
def lowerCamelCase_ ( cls : Dict,__A : Union[str, os.PathLike],**__A : Tuple ):
cls._set_token_in_kwargs(__A )
_lowerCamelCase , _lowerCamelCase : int = cls.get_config_dict(__A,**__A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_lowerCamelCase : Dict = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A,**__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'owlvit'
lowerCAmelCase_ = True
def __init__( self : Tuple,__A : str=None,__A : Tuple=None,__A : str=5_1_2,__A : Union[str, Any]=2.6592,__A : List[Any]=True,**__A : str,):
super().__init__(**__A )
if text_config is None:
_lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
_lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
_lowerCamelCase : Any = OwlViTTextConfig(**__A )
_lowerCamelCase : str = OwlViTVisionConfig(**__A )
_lowerCamelCase : Tuple = projection_dim
_lowerCamelCase : Union[str, Any] = logit_scale_init_value
_lowerCamelCase : int = return_dict
_lowerCamelCase : Optional[int] = 1.0
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[str, os.PathLike],**__A : int ):
cls._set_token_in_kwargs(__A )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = cls.get_config_dict(__A,**__A )
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__A,**__A )
@classmethod
def lowerCamelCase_ ( cls : str,__A : Dict,__A : Dict,**__A : Any ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[str] = text_config
_lowerCamelCase : str = vision_config
return cls.from_dict(__A,**__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : int = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Optional[int] = self.text_config.to_dict()
_lowerCamelCase : Optional[int] = self.vision_config.to_dict()
_lowerCamelCase : Dict = self.__class__.model_type
return output
class UpperCAmelCase__ ( A ):
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowerCamelCase_ ( self : str ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return 1e-4
def lowerCamelCase_ ( self : List[str],__A : "ProcessorMixin",__A : int = -1,__A : int = -1,__A : Optional["TensorType"] = None,):
_lowerCamelCase : int = super().generate_dummy_inputs(
processor.tokenizer,batch_size=__A,seq_length=__A,framework=__A )
_lowerCamelCase : List[Any] = super().generate_dummy_inputs(
processor.image_processor,batch_size=__A,framework=__A )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase_ ( self : Optional[Any] ):
return 1_4 | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : str = None , _lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
_lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
_lowerCamelCase : Tuple = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_lowerCamelCase : List[str] = question_encoder_name_or_path
_lowerCamelCase : Optional[Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
_lowerCamelCase : Union[str, Any] = RagConfig.from_pretrained(_lowerCAmelCase )
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCamelCase : Tuple = gen_config
_lowerCamelCase : Any = question_encoder_config
_lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
_lowerCamelCase : str = AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * n
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : int = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCamelCase : Tuple = i * 2
while index < n:
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = index + i
_lowerCamelCase : int = [2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def A_ ( _lowerCAmelCase : int = 999966663333 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = math.floor(math.sqrt(_lowerCAmelCase ) ) + 100
_lowerCamelCase : Union[str, Any] = prime_sieve(_lowerCAmelCase )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCamelCase : Optional[Any] = primes[prime_index + 1]
_lowerCamelCase : int = last_prime**2
_lowerCamelCase : Optional[int] = next_prime**2
# Get numbers divisible by lps(current)
_lowerCamelCase : int = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCamelCase : Optional[int] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCamelCase : Optional[Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution()) | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowerCamelCase_ ( self : str ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ),supervised_keys=__A,)
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Dict ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN,gen_kwargs={"examples": get_test_dummy_examples()} )]
def lowerCamelCase_ ( self : Any,__A : Any,__A : Optional[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowerCamelCase_ ( self : str ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ),supervised_keys=__A,)
def lowerCamelCase_ ( self : Optional[Any],__A : int,__A : str ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,gen_kwargs={"examples": get_test_nested_examples()} )
]
def lowerCamelCase_ ( self : Tuple,__A : str,__A : Optional[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
def A_ ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def A_ ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class UpperCAmelCase__ ( A ):
@require_beam
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=__A,beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A,builder.name,"default","0.0.0",f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features,datasets.Features({"content": datasets.Value("string" )} ) )
_lowerCamelCase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,__A )
self.assertEqual(dset["train"].info.splits["train"].num_examples,__A )
self.assertDictEqual(dset["train"][0],get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1],get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self : Union[str, Any] ):
import apache_beam as beam
_lowerCamelCase : str = beam.io.parquetio.WriteToParquet
_lowerCamelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Union[str, Any] = DummyBeamDataset(cache_dir=__A,beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_lowerCamelCase : List[str] = partial(__A,num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__A,builder.name,"default","0.0.0",f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__A,builder.name,"default","0.0.0",f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features,datasets.Features({"content": datasets.Value("string" )} ) )
_lowerCamelCase : str = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,__A )
self.assertEqual(dset["train"].info.splits["train"].num_examples,__A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ),sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__A,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Dict = DummyBeamDataset(cache_dir=__A )
self.assertRaises(datasets.builder.MissingBeamOptions,builder.download_and_prepare )
@require_beam
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[str] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : str = NestedBeamDataset(cache_dir=__A,beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A,builder.name,"default","0.0.0",f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features,datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_lowerCamelCase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,__A )
self.assertEqual(dset["train"].info.splits["train"].num_examples,__A )
self.assertDictEqual(dset["train"][0],get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1],get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = RobertaTokenizer
lowerCAmelCase_ = RobertaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'cls_token': '<s>'}
def lowerCamelCase_ ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCamelCase : List[Any] = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Union[str, Any] = {"unk_token": "<unk>"}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,**__A : Optional[int] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Tuple = "lower newer"
_lowerCamelCase : Optional[int] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = self.tokenizer_class(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : int = "lower newer"
_lowerCamelCase : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : Dict = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A,__A )
_lowerCamelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!",add_special_tokens=__A ),[0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418",add_special_tokens=__A ),[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2],)
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = self.tokenizer_class.from_pretrained("roberta-base" )
_lowerCamelCase : List[str] = tokenizer.encode("sequence builders",add_special_tokens=__A )
_lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build",add_special_tokens=__A )
_lowerCamelCase : Optional[int] = tokenizer.encode(
"sequence builders",add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = tokenizer.encode(
"sequence builders","multi-sequence build",add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A )
_lowerCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__A,__A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : int = "Encode this sequence."
_lowerCamelCase : Union[str, Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
_lowerCamelCase : Optional[Any] = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A,__A )
_lowerCamelCase : List[Any] = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A,__A )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
_lowerCamelCase : Optional[Any] = tokenizer.encode(__A,add_special_tokens=__A )
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A,__A )
# Testing spaces after special tokens
_lowerCamelCase : str = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__A,lstrip=__A,rstrip=__A )} ) # mask token has a left space
_lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A )
_lowerCamelCase : str = "Encode <mask> sequence"
_lowerCamelCase : Optional[int] = "Encode <mask>sequence"
_lowerCamelCase : int = tokenizer.encode(__A )
_lowerCamelCase : Any = encoded.index(__A )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A,__A )
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : Dict = encoded.index(__A )
_lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A,__A )
def lowerCamelCase_ ( self : int ):
pass
def lowerCamelCase_ ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : int = self.tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : int = "A, <mask> AllenNLP sentence."
_lowerCamelCase : Tuple = tokenizer_r.encode_plus(__A,add_special_tokens=__A,return_token_type_ids=__A )
_lowerCamelCase : str = tokenizer_p.encode_plus(__A,add_special_tokens=__A,return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ),sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ),sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ),)
_lowerCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"],[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"],[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def lowerCamelCase_ ( self : List[Any] ):
for trim_offsets, add_prefix_space in itertools.product([True, False],repeat=2 ):
_lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"],__A )
self.assertEqual(post_processor_state["add_prefix_space"],__A )
self.assertEqual(post_processor_state["trim_offsets"],__A )
def lowerCamelCase_ ( self : List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : List[Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : List[Any] = f'{text_of_1_token} {text_of_1_token}'
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : Optional[Any] = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(len(__A ) + 1, len(__A ) + 1 + len(__A )),)
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : int = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(len(__A ) + 1, len(__A ) + 1 + len(__A )),)
_lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : str = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(len(__A ), len(__A ) + 1 + len(__A )),)
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : List[str] = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(len(__A ), len(__A ) + 1 + len(__A )),)
_lowerCamelCase : Optional[Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : Optional[Any] = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )),)
_lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : str = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )),)
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : Optional[Any] = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )),) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = input("Enter message: " )
_lowerCamelCase : List[str] = input("Enter key [alphanumeric]: " )
_lowerCamelCase : Optional[Any] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
_lowerCamelCase : Any = "encrypt"
_lowerCamelCase : List[Any] = encrypt_message(_lowerCAmelCase , _lowerCAmelCase )
elif mode.lower().startswith("d" ):
_lowerCamelCase : List[Any] = "decrypt"
_lowerCamelCase : Any = decrypt_message(_lowerCAmelCase , _lowerCAmelCase )
print(F'\n{mode.title()}ed message:' )
print(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
return translate_message(_lowerCAmelCase , _lowerCAmelCase , "encrypt" )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
return translate_message(_lowerCAmelCase , _lowerCAmelCase , "decrypt" )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = key.upper()
for symbol in message:
_lowerCamelCase : int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCAmelCase ):
_lowerCamelCase : Dict = 0
else:
translated.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase_ : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(default=A , metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCamelCase_ ( self : List[Any] ):
if self.train_file is not None:
_lowerCamelCase : Optional[int] = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCamelCase : str = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = True
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __call__( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : List[Any] = "label" if "label" in features[0].keys() else "labels"
_lowerCamelCase : Optional[int] = [feature.pop(__A ) for feature in features]
_lowerCamelCase : Any = len(__A )
_lowerCamelCase : List[str] = len(features[0]["input_ids"] )
_lowerCamelCase : str = [
[{k: v[i] for k, v in feature.items()} for i in range(__A )] for feature in features
]
_lowerCamelCase : Union[str, Any] = list(chain(*__A ) )
_lowerCamelCase : List[Any] = self.tokenizer.pad(
__A,padding=self.padding,max_length=self.max_length,pad_to_multiple_of=self.pad_to_multiple_of,return_tensors="pt",)
# Un-flatten
_lowerCamelCase : Optional[Any] = {k: v.view(__A,__A,-1 ) for k, v in batch.items()}
# Add back labels
_lowerCamelCase : List[str] = torch.tensor(__A,dtype=torch.intaa )
return batch
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , _lowerCAmelCase , _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCamelCase : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCamelCase : Optional[Any] = {}
if data_args.train_file is not None:
_lowerCamelCase : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
_lowerCamelCase : Dict = data_args.validation_file
_lowerCamelCase : Any = data_args.train_file.split("." )[-1]
_lowerCamelCase : Tuple = load_dataset(
_lowerCAmelCase , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCamelCase : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : Any = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCamelCase : str = [F'ending{i}' for i in range(4 )]
_lowerCamelCase : List[str] = "sent1"
_lowerCamelCase : Any = "sent2"
if data_args.max_seq_length is None:
_lowerCamelCase : Dict = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_lowerCamelCase : Optional[int] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_lowerCamelCase : Dict = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCAmelCase : Dict ):
_lowerCamelCase : Optional[int] = [[context] * 4 for context in examples[context_name]]
_lowerCamelCase : List[str] = examples[question_header_name]
_lowerCamelCase : Optional[int] = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(_lowerCAmelCase )
]
# Flatten out
_lowerCamelCase : Any = list(chain(*_lowerCAmelCase ) )
_lowerCamelCase : Tuple = list(chain(*_lowerCAmelCase ) )
# Tokenize
_lowerCamelCase : List[str] = tokenizer(
_lowerCAmelCase , _lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowerCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_lowerCamelCase : List[str] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_lowerCamelCase : Optional[int] = min(len(_lowerCAmelCase ) , data_args.max_train_samples )
_lowerCamelCase : Optional[Any] = train_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_lowerCamelCase : Tuple = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_lowerCamelCase : Union[str, Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_lowerCamelCase : Optional[int] = min(len(_lowerCAmelCase ) , data_args.max_eval_samples )
_lowerCamelCase : Tuple = eval_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_lowerCamelCase : Optional[int] = eval_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCamelCase : List[str] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowerCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCAmelCase : List[str] ):
_lowerCamelCase , _lowerCamelCase : Dict = eval_predictions
_lowerCamelCase : List[Any] = np.argmax(_lowerCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCamelCase : Dict = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , )
# Training
if training_args.do_train:
_lowerCamelCase : int = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : str = last_checkpoint
_lowerCamelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCamelCase : List[Any] = train_result.metrics
_lowerCamelCase : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
_lowerCamelCase : List[str] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics("train" , _lowerCAmelCase )
trainer.save_metrics("train" , _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Tuple = trainer.evaluate()
_lowerCamelCase : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics("eval" , _lowerCAmelCase )
trainer.save_metrics("eval" , _lowerCAmelCase )
_lowerCamelCase : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'upernet'
def __init__( self : Any,__A : Optional[Any]=None,__A : Dict=5_1_2,__A : Optional[Any]=0.02,__A : List[Any]=[1, 2, 3, 6],__A : Dict=True,__A : str=0.4,__A : Union[str, Any]=3_8_4,__A : Optional[Any]=2_5_6,__A : Optional[int]=1,__A : Any=False,__A : Dict=2_5_5,**__A : Optional[Any],):
super().__init__(**__A )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCamelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(__A,__A ):
_lowerCamelCase : List[Any] = backbone_config.get("model_type" )
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Union[str, Any] = config_class.from_dict(__A )
_lowerCamelCase : List[Any] = backbone_config
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Dict = pool_scales
_lowerCamelCase : Any = use_auxiliary_head
_lowerCamelCase : Any = auxiliary_loss_weight
_lowerCamelCase : str = auxiliary_in_channels
_lowerCamelCase : str = auxiliary_channels
_lowerCamelCase : Optional[int] = auxiliary_num_convs
_lowerCamelCase : Dict = auxiliary_concat_input
_lowerCamelCase : List[Any] = loss_ignore_index
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : List[Any] = self.backbone_config.to_dict()
_lowerCamelCase : Optional[int] = self.__class__.model_type
return output | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
import math
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : List[str] = 7
UpperCAmelCase_ : int = BALLS_PER_COLOUR * NUM_COLOURS
def A_ ( _lowerCAmelCase : int = 20 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.comb(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCAmelCase )
_lowerCamelCase : Any = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(20)) | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : Optional[int] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
lowerCAmelCase_ = PegasusConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = 'gelu'
def __init__( self : List[Any],__A : Optional[Any],__A : Tuple=1_3,__A : Any=7,__A : List[str]=True,__A : str=False,__A : int=9_9,__A : Any=3_2,__A : List[str]=5,__A : int=4,__A : int=3_7,__A : Dict=0.1,__A : str=0.1,__A : Union[str, Any]=2_0,__A : int=2,__A : str=1,__A : List[Any]=0,):
_lowerCamelCase : str = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Any = is_training
_lowerCamelCase : int = use_labels
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Tuple = eos_token_id
_lowerCamelCase : Optional[Any] = pad_token_id
_lowerCamelCase : Tuple = bos_token_id
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size ).clip(3,self.vocab_size )
_lowerCamelCase : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ),1 )
_lowerCamelCase : Dict = np.concatenate([input_ids, eos_tensor],axis=1 )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = self.config_cls(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_ids=[2],bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,decoder_start_token_id=self.pad_token_id,**self.config_updates,)
_lowerCamelCase : int = prepare_pegasus_inputs_dict(__A,__A,__A )
return config, inputs_dict
def lowerCamelCase_ ( self : Tuple,__A : List[str],__A : List[Any],__A : str ):
_lowerCamelCase : Union[str, Any] = 2_0
_lowerCamelCase : Tuple = model_class_name(__A )
_lowerCamelCase : Optional[int] = model.encode(inputs_dict["input_ids"] )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowerCamelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0],__A,__A )
_lowerCamelCase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length),dtype="i4" )
_lowerCamelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :],(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),)
_lowerCamelCase : int = model.decode(
decoder_input_ids[:, :-1],__A,decoder_attention_mask=__A,past_key_values=__A,decoder_position_ids=__A,)
_lowerCamelCase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]],dtype="i4" )
_lowerCamelCase : List[str] = model.decode(
decoder_input_ids[:, -1:],__A,decoder_attention_mask=__A,past_key_values=outputs_cache.past_key_values,decoder_position_ids=__A,)
_lowerCamelCase : str = model.decode(__A,__A )
_lowerCamelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3,msg=f'Max diff is {diff}' )
def lowerCamelCase_ ( self : int,__A : str,__A : Dict,__A : Optional[Any] ):
_lowerCamelCase : List[str] = 2_0
_lowerCamelCase : Tuple = model_class_name(__A )
_lowerCamelCase : Any = model.encode(inputs_dict["input_ids"] )
_lowerCamelCase , _lowerCamelCase : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_lowerCamelCase : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
],axis=-1,)
_lowerCamelCase : str = model.init_cache(decoder_input_ids.shape[0],__A,__A )
_lowerCamelCase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :],(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),)
_lowerCamelCase : List[str] = model.decode(
decoder_input_ids[:, :-1],__A,decoder_attention_mask=__A,past_key_values=__A,decoder_position_ids=__A,)
_lowerCamelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]],dtype="i4" )
_lowerCamelCase : str = model.decode(
decoder_input_ids[:, -1:],__A,past_key_values=outputs_cache.past_key_values,decoder_attention_mask=__A,decoder_position_ids=__A,)
_lowerCamelCase : List[Any] = model.decode(__A,__A,decoder_attention_mask=__A )
_lowerCamelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3,msg=f'Max diff is {diff}' )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCamelCase : Dict = np.not_equal(_lowerCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Union[str, Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = FlaxPegasusModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self,config_class=__A )
def lowerCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__A,__A,__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__A,__A,__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : str = self._prepare_for_class(__A,__A )
_lowerCamelCase : List[str] = model_class(__A )
@jax.jit
def encode_jitted(__A : Union[str, Any],__A : int=None,**__A : List[Any] ):
return model.encode(input_ids=__A,attention_mask=__A )
with self.subTest("JIT Enabled" ):
_lowerCamelCase : Dict = encode_jitted(**__A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowerCamelCase : Any = encode_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ),len(__A ) )
for jitted_output, output in zip(__A,__A ):
self.assertEqual(jitted_output.shape,output.shape )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase : List[Any] = model_class(__A )
_lowerCamelCase : int = model.encode(inputs_dict["input_ids"],inputs_dict["attention_mask"] )
_lowerCamelCase : Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__A : Tuple,__A : List[str],__A : Tuple ):
return model.decode(
decoder_input_ids=__A,decoder_attention_mask=__A,encoder_outputs=__A,)
with self.subTest("JIT Enabled" ):
_lowerCamelCase : Optional[int] = decode_jitted(**__A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_lowerCamelCase : Optional[Any] = decode_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ),len(__A ) )
for jitted_output, output in zip(__A,__A ):
self.assertEqual(jitted_output.shape,output.shape )
@slow
def lowerCamelCase_ ( self : List[str] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Optional[int] = model_class_name.from_pretrained("google/pegasus-large",from_pt=__A )
_lowerCamelCase : Any = np.ones((1, 1) )
_lowerCamelCase : List[str] = model(__A )
self.assertIsNotNone(__A )
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[str] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_lowerCamelCase : Dict = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_lowerCamelCase : List[str] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_lowerCamelCase : Optional[int] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_lowerCamelCase : List[Any] = tokenizer(__A,return_tensors="np",truncation=__A,max_length=5_1_2,padding=__A )
_lowerCamelCase : Tuple = model.generate(**__A,num_beams=2 ).sequences
_lowerCamelCase : Dict = tokenizer.batch_decode(__A,skip_special_tokens=__A )
assert tgt_text == decoded | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "cache"
_lowerCamelCase : List[str] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : Tuple = {"text": "string"}
_lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_lowerCamelCase : List[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Any = TextDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = tmp_path / "cache"
_lowerCamelCase : List[Any] = {"text": "string"}
_lowerCamelCase : Dict = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] ):
"""simple docstring"""
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Dict = text_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : int = [text_path]
_lowerCamelCase : List[Any] = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"text": "string"}
_lowerCamelCase : Union[str, Any] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_dataset(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=("train",) ):
"""simple docstring"""
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
_lowerCamelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = tmp_path / "cache"
_lowerCamelCase : Dict = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Dict = TextDatasetReader({"train": text_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_lowerCamelCase : Optional[int] = {"text": "string"}
_lowerCamelCase : Optional[int] = features.copy() if features else default_expected_features
_lowerCamelCase : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Tuple = TextDatasetReader({"train": text_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if split:
_lowerCamelCase : Tuple = {split: text_path}
else:
_lowerCamelCase : List[str] = "train"
_lowerCamelCase : List[str] = {"train": text_path, "test": text_path}
_lowerCamelCase : str = tmp_path / "cache"
_lowerCamelCase : int = {"text": "string"}
_lowerCamelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
_lowerCamelCase : Any = sorted(string.lower() )
return len(_lowerCAmelCase ) == len(set(_lowerCAmelCase ) )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = input('Enter a string ').strip()
UpperCAmelCase_ : List[Any] = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''') | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
assert masked_input.count("<mask>" ) == 1
_lowerCamelCase : List[str] = torch.tensor(tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1
_lowerCamelCase : Optional[Any] = model(_lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple
_lowerCamelCase : List[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_lowerCamelCase : List[Any] = logits[0, masked_index, :]
_lowerCamelCase : List[str] = logits.softmax(dim=0 )
_lowerCamelCase , _lowerCamelCase : List[Any] = prob.topk(k=_lowerCAmelCase , dim=0 )
_lowerCamelCase : Union[str, Any] = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowerCAmelCase ) )] )
_lowerCamelCase : List[Any] = tokenizer.mask_token
_lowerCamelCase : List[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
_lowerCamelCase : Tuple = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(_lowerCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(_lowerCAmelCase ) , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowerCAmelCase , _lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCAmelCase_ : str = CamembertTokenizer.from_pretrained('camembert-base')
UpperCAmelCase_ : Optional[Any] = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
UpperCAmelCase_ : Optional[int] = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
return len(set(_lowerCAmelCase ) ) == len(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(),encoding="utf-8",check=__A,)
assert hasattr(self,"env" )
def lowerCamelCase_ ( self : Optional[Any],__A : int ):
_lowerCamelCase : Union[str, Any] = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
_lowerCamelCase : Tuple = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script,source_dir=self.env.test_path,role=self.env.role,image_uri=self.env.image_uri,base_job_name=__A,instance_count=__A,instance_type=self.instance_type,debugger_hook_config=__A,hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path},metric_definitions=self.env.metric_definitions,distribution=__A,py_version="py36",)
def lowerCamelCase_ ( self : Dict,__A : Optional[Any] ):
TrainingJobAnalytics(__A ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def lowerCamelCase_ ( self : Tuple,__A : Dict ):
# create estimator
_lowerCamelCase : Any = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds",9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json',"w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss},__A ) | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Any = ""
for i in table:
res += inp[i - 1]
return res
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
return data[1:] + data[0]
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = ""
for i in range(len(_lowerCAmelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = int("0b" + data[0] + data[-1] , 2 )
_lowerCamelCase : str = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = message[:4]
_lowerCamelCase : Optional[Any] = message[4:]
_lowerCamelCase : List[Any] = apply_table(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Any = xor(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Dict = apply_sbox(_lowerCAmelCase , temp[:4] ) # noqa: E741
_lowerCamelCase : List[Any] = apply_sbox(_lowerCAmelCase , temp[4:] )
_lowerCamelCase : Any = "0" * (2 - len(_lowerCAmelCase )) + l # noqa: E741
_lowerCamelCase : Union[str, Any] = "0" * (2 - len(_lowerCAmelCase )) + r
_lowerCamelCase : str = apply_table(l + r , _lowerCAmelCase )
_lowerCamelCase : List[Any] = xor(_lowerCAmelCase , _lowerCAmelCase )
return temp + right
if __name__ == "__main__":
UpperCAmelCase_ : int = input('Enter 10 bit key: ')
UpperCAmelCase_ : Union[str, Any] = input('Enter 8 bit message: ')
UpperCAmelCase_ : Any = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCAmelCase_ : List[str] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCAmelCase_ : List[Any] = [2, 4, 3, 1]
UpperCAmelCase_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCAmelCase_ : int = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCAmelCase_ : Dict = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCAmelCase_ : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCAmelCase_ : Tuple = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCAmelCase_ : Union[str, Any] = apply_table(key, paa_table)
UpperCAmelCase_ : Tuple = temp[:5]
UpperCAmelCase_ : List[str] = temp[5:]
UpperCAmelCase_ : str = left_shift(left)
UpperCAmelCase_ : int = left_shift(right)
UpperCAmelCase_ : List[str] = apply_table(left + right, pa_table)
UpperCAmelCase_ : int = left_shift(left)
UpperCAmelCase_ : Any = left_shift(right)
UpperCAmelCase_ : Dict = left_shift(left)
UpperCAmelCase_ : Any = left_shift(right)
UpperCAmelCase_ : List[str] = apply_table(left + right, pa_table)
# encryption
UpperCAmelCase_ : List[Any] = apply_table(message, IP)
UpperCAmelCase_ : Any = function(expansion, sa, sa, keya, temp)
UpperCAmelCase_ : List[Any] = temp[4:] + temp[:4]
UpperCAmelCase_ : Dict = function(expansion, sa, sa, keya, temp)
UpperCAmelCase_ : Any = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
UpperCAmelCase_ : Optional[Any] = apply_table(CT, IP)
UpperCAmelCase_ : Tuple = function(expansion, sa, sa, keya, temp)
UpperCAmelCase_ : List[Any] = temp[4:] + temp[:4]
UpperCAmelCase_ : List[str] = function(expansion, sa, sa, keya, temp)
UpperCAmelCase_ : List[str] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT) | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def A_ ( ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = 9, 14 # noqa: F841
_lowerCamelCase : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_lowerCamelCase : Optional[Any] = defaultdict(_lowerCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_lowerCamelCase : str = mst(_lowerCAmelCase )
_lowerCamelCase : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_lowerCamelCase : List[Any] = tuple(answer[:2] )
_lowerCamelCase : Dict = tuple(edge[::-1] )
assert edge in result or reverse in result | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['image_processor', 'feature_extractor']
lowerCAmelCase_ = 'TvltImageProcessor'
lowerCAmelCase_ = 'TvltFeatureExtractor'
def __init__( self : Any,__A : List[Any],__A : str ):
super().__init__(image_processor=__A,feature_extractor=__A )
_lowerCamelCase : Dict = image_processor
_lowerCamelCase : List[str] = feature_extractor
def __call__( self : List[Any],__A : Optional[int]=None,__A : List[Any]=None,__A : List[str]=None,__A : int=None,__A : Tuple=False,__A : Dict=False,*__A : List[Any],**__A : Optional[Any],):
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
_lowerCamelCase : List[Any] = None
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__A,mask_pixel=__A,*__A,**__A )
if images_mixed is not None:
_lowerCamelCase : Dict = self.image_processor(__A,is_mixed=__A,*__A,**__A )
if audio is not None:
_lowerCamelCase : Optional[int] = self.feature_extractor(
__A,*__A,sampling_rate=__A,mask_audio=__A,**__A )
_lowerCamelCase : Dict = {}
if audio is not None:
output_dict.update(__A )
if images is not None:
output_dict.update(__A )
if images_mixed_dict is not None:
output_dict.update(__A )
return output_dict
@property
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = self.image_processor.model_input_names
_lowerCamelCase : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Union[str, Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : List[str] = 16
_lowerCamelCase : Union[str, Any] = [5, 11, 17, 23]
_lowerCamelCase : int = [256, 512, 1024, 1024]
_lowerCamelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowerCamelCase : Dict = 768
_lowerCamelCase : List[Any] = [1, 1, 1, 0.5]
_lowerCamelCase : Optional[int] = [256, 512, 768, 768]
_lowerCamelCase : str = 150
_lowerCamelCase : List[Any] = 16
_lowerCamelCase : Optional[int] = (1, 384, 384)
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = "project"
if "ade" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[int] = [1, 1, 1, 0.5]
_lowerCamelCase : Dict = 150
_lowerCamelCase : Dict = 16
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[int] = "ade20k-id2label.json"
_lowerCamelCase : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCamelCase : str = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
_lowerCamelCase : List[str] = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
_lowerCamelCase : str = name.replace("patch_embed" , "" )
if "pos_embed" in name:
_lowerCamelCase : Optional[int] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
_lowerCamelCase : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
_lowerCamelCase : Union[str, Any] = name.replace("proj" , "projection" )
if "blocks" in name:
_lowerCamelCase : Dict = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
_lowerCamelCase : Optional[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCamelCase : List[str] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
_lowerCamelCase : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
_lowerCamelCase : Tuple = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
_lowerCamelCase : Any = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
_lowerCamelCase : Dict = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
_lowerCamelCase : List[Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
_lowerCamelCase : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
_lowerCamelCase : List[Any] = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
_lowerCamelCase : int = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
_lowerCamelCase : Union[str, Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCamelCase : List[str] = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_lowerCamelCase : Optional[Any] = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
_lowerCamelCase : List[Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
_lowerCamelCase : Tuple = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
_lowerCamelCase : Union[str, Any] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
_lowerCamelCase : Optional[int] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCamelCase : List[str] = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCamelCase : int = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCamelCase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCamelCase : int = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCamelCase : Tuple = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
_lowerCamelCase : List[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
_lowerCamelCase : Any = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
_lowerCamelCase : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
_lowerCamelCase : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
_lowerCamelCase : str = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
_lowerCamelCase : Optional[int] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
_lowerCamelCase : List[Any] = name.replace("pretrained" , "dpt" )
if "bn" in name:
_lowerCamelCase : Union[str, Any] = name.replace("bn" , "batch_norm" )
if "head" in name:
_lowerCamelCase : List[Any] = name.replace("head" , "head.head" )
if "encoder.norm" in name:
_lowerCamelCase : Optional[Any] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
_lowerCamelCase : Union[str, Any] = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
_lowerCamelCase : int = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
_lowerCamelCase : Dict = name.replace(".." , "." )
if "stem.conv" in name:
_lowerCamelCase : Optional[int] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCamelCase : Optional[Any] = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
_lowerCamelCase : Optional[int] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
_lowerCamelCase : Tuple = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
_lowerCamelCase : List[Any] = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
_lowerCamelCase : List[str] = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
_lowerCamelCase : int = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
_lowerCamelCase : int = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Any = in_proj_weight[: config.hidden_size, :]
_lowerCamelCase : int = in_proj_bias[: config.hidden_size]
_lowerCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Dict = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
_lowerCamelCase : List[Any] = state_dict.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
_lowerCamelCase : Any = DPTForSemanticSegmentation(_lowerCAmelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
_lowerCamelCase : Dict = 480 if "ade" in checkpoint_url else 384
_lowerCamelCase : Union[str, Any] = DPTImageProcessor(size=_lowerCAmelCase )
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : Any = image_processor(_lowerCAmelCase , return_tensors="pt" )
# forward pass
_lowerCamelCase : Dict = model(**_lowerCAmelCase ).logits if "ade" in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
_lowerCamelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
UpperCAmelCase_ : Tuple = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
lowerCAmelCase_ = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'A csv or a json file containing the training data.'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'A csv or a json file containing the validation data.'} )
lowerCAmelCase_ = field(default=A , metadata={'help': 'A csv or a json file containing the test data.'} )
def lowerCamelCase_ ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
_lowerCamelCase : Union[str, Any] = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_lowerCamelCase : List[str] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase_ = field(
default=A , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCamelCase : Any = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_lowerCamelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_lowerCamelCase : Dict = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_lowerCamelCase : Union[str, Any] = data_args.train_file.split("." )[-1]
_lowerCamelCase : Union[str, Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_lowerCamelCase : Tuple = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_lowerCamelCase : List[str] = load_dataset("json" , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_lowerCamelCase : Tuple = raw_datasets["train"].features["label"].names
_lowerCamelCase : Any = len(_lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_lowerCamelCase : Dict = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_lowerCAmelCase , )
_lowerCamelCase : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_lowerCamelCase : Tuple = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_lowerCamelCase : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_lowerCamelCase : List[Any] = {"Refused": 0, "Entailed": 1}
_lowerCamelCase : Union[str, Any] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_lowerCamelCase : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_lowerCAmelCase : Optional[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(_lowerCAmelCase : List[str] ):
_lowerCamelCase : List[Any] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_lowerCamelCase : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_lowerCamelCase : List[str] = examples["statement"]
_lowerCamelCase : List[str] = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_lowerCamelCase : List[str] = tokenizer(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_lowerCamelCase : Optional[Any] = raw_datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_lowerCamelCase : Any = raw_datasets["train"]
if data_args.max_train_samples is not None:
_lowerCamelCase : Union[str, Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_lowerCamelCase : str = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_lowerCamelCase : Optional[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_lowerCamelCase : Optional[int] = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_lowerCamelCase : Optional[int] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_lowerCAmelCase ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase : EvalPrediction ):
_lowerCamelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , _lowerCAmelCase ) else p.predictions
_lowerCamelCase : Any = np.argmax(_lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_lowerCamelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
_lowerCamelCase : List[Any] = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 )
else:
_lowerCamelCase : Dict = None
# Initialize our Trainer
_lowerCamelCase : Dict = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , )
# Training
if training_args.do_train:
_lowerCamelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : str = last_checkpoint
_lowerCamelCase : List[str] = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
_lowerCamelCase : Any = train_result.metrics
_lowerCamelCase : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
_lowerCamelCase : List[Any] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _lowerCAmelCase )
trainer.save_metrics("train" , _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowerCamelCase : Tuple = trainer.evaluate(eval_dataset=_lowerCAmelCase )
_lowerCamelCase : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
_lowerCamelCase : Tuple = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics("eval" , _lowerCAmelCase )
trainer.save_metrics("eval" , _lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_lowerCamelCase : Dict = predict_dataset.remove_columns("label" )
_lowerCamelCase : int = trainer.predict(_lowerCAmelCase , metric_key_prefix="predict" ).predictions
_lowerCamelCase : Optional[int] = np.argmax(_lowerCAmelCase , axis=1 )
_lowerCamelCase : str = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = label_list[item]
writer.write(F'{index}\t{item}\n' )
_lowerCamelCase : Union[str, Any] = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = DebertaTokenizer
lowerCAmelCase_ = True
lowerCAmelCase_ = DebertaTokenizerFast
def lowerCamelCase_ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
_lowerCamelCase : str = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : List[Any] = {"unk_token": "[UNK]"}
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Any = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : str,**__A : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Optional[Any] ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Any = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
_lowerCamelCase : List[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : List[Any] = tokenizer("Hello","World" )
_lowerCamelCase : Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"],__A )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
_lowerCamelCase : int = tokenizer.encode("sequence builders",add_special_tokens=__A )
_lowerCamelCase : int = tokenizer.encode("multi-sequence build",add_special_tokens=__A )
_lowerCamelCase : Any = tokenizer.encode(
"sequence builders",add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = tokenizer.encode(
"sequence builders","multi-sequence build",add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__A )
_lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A,__A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[int] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_lowerCamelCase : List[str] = tokenizer_class.from_pretrained("microsoft/deberta-base" )
_lowerCamelCase : str = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
_lowerCamelCase : Dict = tokenizer(__A,padding=__A )
_lowerCamelCase : Optional[int] = [tokenizer.decode(__A,skip_special_tokens=__A ) for seq in encoding["input_ids"]]
# fmt: off
_lowerCamelCase : Tuple = {
"input_ids": [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCamelCase : str = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data,__A )
for expected, decoded in zip(__A,__A ):
self.assertEqual(__A,__A ) | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = (DPMSolverSDEScheduler,)
lowerCAmelCase_ = 10
def lowerCamelCase_ ( self : str,**__A : List[Any] ):
_lowerCamelCase : Optional[int] = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**__A )
return config
def lowerCamelCase_ ( self : List[str] ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001],[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A,beta_end=__A )
def lowerCamelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def lowerCamelCase_ ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCamelCase : Tuple = self.dummy_model()
_lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Dict = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__A,__A )
_lowerCamelCase : Tuple = model(__A,__A )
_lowerCamelCase : Tuple = scheduler.step(__A,__A,__A )
_lowerCamelCase : List[str] = output.prev_sample
_lowerCamelCase : List[Any] = torch.sum(torch.abs(__A ) )
_lowerCamelCase : str = torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Any = self.scheduler_classes[0]
_lowerCamelCase : Tuple = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCamelCase : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCamelCase : Tuple = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCamelCase : str = scheduler.scale_model_input(__A,__A )
_lowerCamelCase : Dict = model(__A,__A )
_lowerCamelCase : Dict = scheduler.step(__A,__A,__A )
_lowerCamelCase : Optional[Any] = output.prev_sample
_lowerCamelCase : Dict = torch.sum(torch.abs(__A ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps,device=__A )
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : int = self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCamelCase : Dict = scheduler.scale_model_input(__A,__A )
_lowerCamelCase : Dict = model(__A,__A )
_lowerCamelCase : int = scheduler.step(__A,__A,__A )
_lowerCamelCase : str = output.prev_sample
_lowerCamelCase : int = torch.sum(torch.abs(__A ) )
_lowerCamelCase : str = torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Dict = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__A,use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps,device=__A )
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
_lowerCamelCase : Optional[Any] = sample.to(__A )
for t in scheduler.timesteps:
_lowerCamelCase : str = scheduler.scale_model_input(__A,__A )
_lowerCamelCase : Tuple = model(__A,__A )
_lowerCamelCase : Union[str, Any] = scheduler.step(__A,__A,__A )
_lowerCamelCase : Tuple = output.prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__A ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(__A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
import numpy as np
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = int(np.ceil((x_end - xa) / h ) )
_lowerCamelCase : List[Any] = np.zeros((n + 1,) )
_lowerCamelCase : Any = ya
_lowerCamelCase : List[str] = xa
for k in range(_lowerCAmelCase ):
_lowerCamelCase : Tuple = f(_lowerCAmelCase , y[k] )
_lowerCamelCase : Optional[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCamelCase : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
_lowerCamelCase : Optional[int] = f(x + h , y[k] + h * ka )
_lowerCamelCase : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase_ : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
"""simple docstring"""
return max(metric_fn(_lowerCAmelCase , _lowerCAmelCase ) for gt in ground_truths )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCamelCase : Union[str, Any] = []
if args.gold_data_mode == "qa":
_lowerCamelCase : Optional[Any] = pd.read_csv(_lowerCAmelCase , sep="\t" , header=_lowerCAmelCase )
for answer_list in data[1]:
_lowerCamelCase : Union[str, Any] = ast.literal_eval(_lowerCAmelCase )
answers.append(_lowerCAmelCase )
else:
_lowerCamelCase : str = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCamelCase : List[Any] = [[reference] for reference in references]
_lowerCamelCase : Union[str, Any] = 0
for prediction, ground_truths in zip(_lowerCAmelCase , _lowerCAmelCase ):
total += 1
em += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
fa += metric_max_over_ground_truths(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Dict = 1_0_0.0 * em / total
_lowerCamelCase : Dict = 1_0_0.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : int = args.k
_lowerCamelCase : Optional[Any] = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCamelCase : Union[str, Any] = [line.strip() for line in open(_lowerCAmelCase , "r" ).readlines()]
_lowerCamelCase : Optional[Any] = 0
for hypo, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = set(hypo.split("\t" )[:k] )
_lowerCamelCase : List[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCamelCase : List[Any] = 1_0_0.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Any ):
"""simple docstring"""
def strip_title(_lowerCAmelCase : List[Any] ):
if title.startswith("\"" ):
_lowerCamelCase : Dict = title[1:]
if title.endswith("\"" ):
_lowerCamelCase : List[str] = title[:-1]
return title
_lowerCamelCase : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , )["input_ids"].to(args.device )
_lowerCamelCase : Any = rag_model.rag.question_encoder(_lowerCAmelCase )
_lowerCamelCase : str = question_enc_outputs[0]
_lowerCamelCase : List[str] = rag_model.retriever(
_lowerCAmelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
_lowerCamelCase : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCamelCase : str = []
for docs in all_docs:
_lowerCamelCase : Union[str, Any] = [strip_title(_lowerCAmelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(_lowerCAmelCase ) )
return provenance_strings
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_lowerCAmelCase , return_tensors="pt" , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = inputs_dict.input_ids.to(args.device )
_lowerCamelCase : Any = inputs_dict.attention_mask.to(args.device )
_lowerCamelCase : Dict = rag_model.generate( # rag_model overwrites generate
_lowerCAmelCase , attention_mask=_lowerCAmelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowerCAmelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCamelCase : str = rag_model.retriever.generator_tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
if args.print_predictions:
for q, a in zip(_lowerCAmelCase , _lowerCAmelCase ):
logger.info("Q: {} - A: {}".format(_lowerCAmelCase , _lowerCAmelCase ) )
return answers
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=_lowerCAmelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=_lowerCAmelCase , choices=["exact", "compressed", "legacy"] , type=_lowerCAmelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=_lowerCAmelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=_lowerCAmelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=_lowerCAmelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=_lowerCAmelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=_lowerCAmelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=_lowerCAmelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=_lowerCAmelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=_lowerCAmelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=_lowerCAmelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
_lowerCamelCase : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
if args.model_type is None:
_lowerCamelCase : List[Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
_lowerCamelCase : Any = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
_lowerCamelCase : Dict = args.n_docs
if args.index_name is not None:
_lowerCamelCase : List[Any] = args.index_name
if args.index_path is not None:
_lowerCamelCase : Union[str, Any] = args.index_path
else:
_lowerCamelCase : Optional[Any] = BartForConditionalGeneration
_lowerCamelCase : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , _lowerCAmelCase )
_lowerCamelCase : str = get_scores if args.eval_mode == "e2e" else get_precision_at_k
_lowerCamelCase : str = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_lowerCAmelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
_lowerCamelCase : List[Any] = RagRetriever.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model_class.from_pretrained(_lowerCAmelCase , retriever=_lowerCAmelCase , **_lowerCAmelCase )
model.retriever.init_retrieval()
else:
_lowerCamelCase : str = model_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
_lowerCamelCase : Optional[Any] = []
for line in tqdm(_lowerCAmelCase ):
questions.append(line.strip() )
if len(_lowerCAmelCase ) == args.eval_batch_size:
_lowerCamelCase : Optional[Any] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) + "\n" )
preds_file.flush()
_lowerCamelCase : str = []
if len(_lowerCAmelCase ) > 0:
_lowerCamelCase : Optional[int] = evaluate_batch_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
preds_file.write("\n".join(_lowerCAmelCase ) )
preds_file.flush()
score_fn(_lowerCAmelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase_ : Any = get_args()
main(args) | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : str = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Union[str, Any],__A : List[Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"],model_result["ss"] ):
_lowerCamelCase : int = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = "sshleifer/tiny-gpt2"
_lowerCamelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : List[str] = PyTorchBenchmark(__A )
_lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = "sgugger/tiny-distilbert-classification"
_lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,only_pretrain_model=__A,)
_lowerCamelCase : Optional[Any] = PyTorchBenchmark(__A )
_lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : str = "sshleifer/tiny-gpt2"
_lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,torchscript=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : int = PyTorchBenchmark(__A )
_lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu","Cant do half precision" )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,fpaa=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : Optional[Any] = PyTorchBenchmark(__A )
_lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = "sshleifer/tiny-gpt2"
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(__A )
# set architectures equal to `None`
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : Union[str, Any] = PyTorchBenchmark(__A,configs=[config] )
_lowerCamelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = "sshleifer/tiny-gpt2"
_lowerCamelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : List[str] = PyTorchBenchmark(__A )
_lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu","Can't do half precision" )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = "sshleifer/tiny-gpt2"
_lowerCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],fpaa=__A,multi_process=__A,)
_lowerCamelCase : int = PyTorchBenchmark(__A )
_lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[Any] = "sshleifer/tiny-gpt2"
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__A )
_lowerCamelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : List[str] = PyTorchBenchmark(__A,configs=[config] )
_lowerCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = "sshleifer/tinier_bart"
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__A )
_lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : Tuple = PyTorchBenchmark(__A,configs=[config] )
_lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = "sshleifer/tiny-gpt2"
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__A )
_lowerCamelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : Dict = PyTorchBenchmark(__A,configs=[config] )
_lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : int = "sshleifer/tinier_bart"
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(__A )
_lowerCamelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],multi_process=__A,)
_lowerCamelCase : Dict = PyTorchBenchmark(__A,configs=[config] )
_lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,save_to_csv=__A,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(__A,"inf_time.csv" ),train_memory_csv_file=os.path.join(__A,"train_mem.csv" ),inference_memory_csv_file=os.path.join(__A,"inf_mem.csv" ),train_time_csv_file=os.path.join(__A,"train_time.csv" ),env_info_csv_file=os.path.join(__A,"env.csv" ),multi_process=__A,)
_lowerCamelCase : List[Any] = PyTorchBenchmark(__A )
benchmark.run()
self.assertTrue(Path(os.path.join(__A,"inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__A,"train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__A,"inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__A,"train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__A,"env.csv" ) ).exists() )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__A : Union[str, Any] ):
self.assertTrue(hasattr(__A,"sequential" ) )
self.assertTrue(hasattr(__A,"cumulative" ) )
self.assertTrue(hasattr(__A,"current" ) )
self.assertTrue(hasattr(__A,"total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=__A,inference=__A,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(__A,"log.txt" ),log_print=__A,trace_memory_line_by_line=__A,multi_process=__A,)
_lowerCamelCase : List[Any] = PyTorchBenchmark(__A )
_lowerCamelCase : List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__A,"log.txt" ) ).exists() ) | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
UpperCAmelCase_ : Dict = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
UpperCAmelCase_ : Tuple = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
UpperCAmelCase_ : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
UpperCAmelCase_ : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = DPRContextEncoderTokenizer
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = DPRQuestionEncoderTokenizer
UpperCAmelCase_ : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCAmelCase_ : Union[str, Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCAmelCase_ : List[Any] = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(A )
class UpperCAmelCase__ :
def __call__( self : str,__A : Dict,__A : Optional[str] = None,__A : Optional[str] = None,__A : Union[bool, str] = False,__A : Union[bool, str] = False,__A : Optional[int] = None,__A : Optional[Union[str, TensorType]] = None,__A : Optional[bool] = None,**__A : Optional[int],):
if titles is None and texts is None:
return super().__call__(
__A,padding=__A,truncation=__A,max_length=__A,return_tensors=__A,return_attention_mask=__A,**__A,)
elif titles is None or texts is None:
_lowerCamelCase : str = titles if texts is None else texts
return super().__call__(
__A,__A,padding=__A,truncation=__A,max_length=__A,return_tensors=__A,return_attention_mask=__A,**__A,)
_lowerCamelCase : int = titles if not isinstance(__A,__A ) else [titles]
_lowerCamelCase : List[Any] = texts if not isinstance(__A,__A ) else [texts]
_lowerCamelCase : Tuple = len(__A )
_lowerCamelCase : str = questions if not isinstance(__A,__A ) else [questions] * n_passages
assert len(__A ) == len(
__A ), f'There should be as many titles than texts but got {len(__A )} titles and {len(__A )} texts.'
_lowerCamelCase : int = super().__call__(__A,__A,padding=__A,truncation=__A )["input_ids"]
_lowerCamelCase : int = super().__call__(__A,add_special_tokens=__A,padding=__A,truncation=__A )["input_ids"]
_lowerCamelCase : Optional[int] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__A,__A )
]
}
if return_attention_mask is not False:
_lowerCamelCase : Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCamelCase : int = attention_mask
return self.pad(__A,padding=__A,max_length=__A,return_tensors=__A )
def lowerCamelCase_ ( self : Optional[int],__A : BatchEncoding,__A : DPRReaderOutput,__A : int = 1_6,__A : int = 6_4,__A : int = 4,):
_lowerCamelCase : Optional[Any] = reader_input["input_ids"]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = reader_output[:3]
_lowerCamelCase : str = len(__A )
_lowerCamelCase : List[str] = sorted(range(__A ),reverse=__A,key=relevance_logits.__getitem__ )
_lowerCamelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCamelCase : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCamelCase : str = sequence_ids.index(self.sep_token_id,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCamelCase : Optional[int] = sequence_ids.index(self.pad_token_id )
else:
_lowerCamelCase : Optional[int] = len(__A )
_lowerCamelCase : Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len],end_logits=end_logits[doc_id][passage_offset:sequence_len],max_answer_length=__A,top_spans=__A,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index],relevance_score=relevance_logits[doc_id],doc_id=__A,start_index=__A,end_index=__A,text=self.decode(sequence_ids[start_index : end_index + 1] ),) )
if len(__A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : List[int],__A : int,__A : int,):
_lowerCamelCase : int = []
for start_index, start_score in enumerate(__A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCamelCase : List[str] = sorted(__A,key=lambda __A : x[1],reverse=__A )
_lowerCamelCase : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
_lowerCamelCase : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class UpperCAmelCase__ ( A , A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = DPRReaderTokenizer | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : Any,*__A : Any,**__A : int ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead.",__A,)
super().__init__(*__A,**__A ) | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
) | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : int,__A : Union[str, Any]=3,__A : Optional[Any]=3_2,__A : int=3,__A : Optional[int]=1_0,__A : str=[8, 1_6, 3_2, 6_4],__A : Optional[Any]=[1, 1, 2, 1],__A : Any=True,__A : str=True,__A : Union[str, Any]="relu",__A : List[Any]=3,__A : Union[str, Any]=None,__A : Union[str, Any]=["stage2", "stage3", "stage4"],__A : Tuple=[2, 3, 4],__A : Any=1,):
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Optional[Any] = embeddings_size
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : str = scope
_lowerCamelCase : Tuple = len(__A )
_lowerCamelCase : Optional[int] = out_features
_lowerCamelCase : Any = out_indices
_lowerCamelCase : Union[str, Any] = num_groups
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Tuple = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size],self.num_labels )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
return BitConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,out_features=self.out_features,out_indices=self.out_indices,num_groups=self.num_groups,)
def lowerCamelCase_ ( self : List[Any],__A : str,__A : List[str],__A : Tuple ):
_lowerCamelCase : Dict = BitModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2),)
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any],__A : Dict,__A : Union[str, Any] ):
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : Any = BitForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Tuple,__A : List[str],__A : Union[str, Any],__A : Optional[Any] ):
_lowerCamelCase : str = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Dict = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Tuple = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase_ = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = BitModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,has_text_modality=__A )
def lowerCamelCase_ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : int ):
return
@unittest.skip(reason="Bit does not output attentions" )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__A )
_lowerCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ),msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
self.assertTrue(
torch.all(module.bias == 0 ),msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
def lowerCamelCase_ ( self : Dict ):
def check_hidden_states_output(__A : Dict,__A : Union[str, Any],__A : Optional[Any] ):
_lowerCamelCase : str = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__A ),expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : Tuple = layer_type
_lowerCamelCase : Tuple = True
check_hidden_states_output(__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(__A,__A,__A )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowerCamelCase_ ( self : Any ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : int = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**__A )
# verify the logits
_lowerCamelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Optional[int] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = BitConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Tuple = BitModelTester(self ) | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import os
import string
import sys
UpperCAmelCase_ : List[Any] = 1 << 8
UpperCAmelCase_ : int = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
UpperCAmelCase_ : List[Any] = KEYMAP['up']
UpperCAmelCase_ : int = KEYMAP['left']
if sys.platform == "win32":
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
UpperCAmelCase_ : Dict = ord(str(i))
def A_ ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
_lowerCamelCase : List[str] = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowerCAmelCase ) == 0:
# Read the keystroke
_lowerCamelCase : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Optional[int] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(_lowerCAmelCase )
if ord(_lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : str = chr(KEYMAP["esc"] )
except KeyError:
_lowerCamelCase : Any = cha[1]
else:
_lowerCamelCase : Dict = ch.decode(_lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : Optional[Any] = sys.stdin.fileno()
_lowerCamelCase : Dict = termios.tcgetattr(_lowerCAmelCase )
try:
tty.setraw(_lowerCAmelCase )
_lowerCamelCase : List[str] = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowerCAmelCase , termios.TCSADRAIN , _lowerCAmelCase )
return ch
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = get_raw_chars()
if ord(_lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowerCAmelCase ) == KEYMAP["esc"]:
_lowerCamelCase : str = get_raw_chars()
if ord(_lowerCAmelCase ) == KEYMAP["mod_int"]:
_lowerCamelCase : Dict = get_raw_chars()
if ord(_lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase__ ( A ):
@require_torch
def lowerCamelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCamelCase : Optional[int] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_lowerCamelCase : Optional[int] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_lowerCamelCase : List[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_lowerCamelCase : Optional[int] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task="fill-mask",model=__A )
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_lowerCamelCase : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : int = "1"
_lowerCamelCase : Any = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCamelCase : Any = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_lowerCamelCase : Union[str, Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_lowerCamelCase : Any = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_lowerCamelCase : Dict = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task="fill-mask",model=__A )
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Union[str, Any] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_lowerCamelCase : int = self.get_env()
_lowerCamelCase : Optional[Any] = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCamelCase : List[str] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
_lowerCamelCase : List[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
_lowerCamelCase : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Optional[int] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_lowerCamelCase : Dict = self.get_env()
_lowerCamelCase : Any = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
# next emulate no network
_lowerCamelCase : str = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : List[Any] = "1"
_lowerCamelCase : int = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = "\nfrom transformers import pipeline\n "
_lowerCamelCase : List[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
_lowerCamelCase : str = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
_lowerCamelCase : Union[str, Any] = self.get_env()
_lowerCamelCase : Any = "1"
_lowerCamelCase : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
_lowerCamelCase : Tuple = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,1,result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode",result.stderr.decode().replace("\n","" ),)
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Dict = "\nfrom transformers import AutoModel\n "
_lowerCamelCase : List[Any] = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Tuple = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_lowerCamelCase : Optional[Any] = self.get_env()
_lowerCamelCase : Tuple = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : str = "1"
_lowerCamelCase : int = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() ) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
from math import factorial
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : int,__A : List[Any] ):
_lowerCamelCase : Dict = real
if isinstance(__A,__A ):
_lowerCamelCase : str = [1] * rank
else:
_lowerCamelCase : Union[str, Any] = rank
def __repr__( self : Union[str, Any] ):
return (
f'{self.real}+'
f'{"+".join(str(__A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real,__A )
def __add__( self : int,__A : str ):
if not isinstance(__A,__A ):
return Dual(self.real + other,self.duals )
_lowerCamelCase : int = self.duals.copy()
_lowerCamelCase : Tuple = other.duals.copy()
if len(__A ) > len(__A ):
o_dual.extend([1] * (len(__A ) - len(__A )) )
elif len(__A ) < len(__A ):
s_dual.extend([1] * (len(__A ) - len(__A )) )
_lowerCamelCase : Tuple = []
for i in range(len(__A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real,__A )
lowerCAmelCase_ = __add__
def __sub__( self : Dict,__A : int ):
return self + other * -1
def __mul__( self : int,__A : Optional[int] ):
if not isinstance(__A,__A ):
_lowerCamelCase : str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other,__A )
_lowerCamelCase : Any = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real,__A )
lowerCAmelCase_ = __mul__
def __truediv__( self : List[str],__A : List[Any] ):
if not isinstance(__A,__A ):
_lowerCamelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other,__A )
raise ValueError
def __floordiv__( self : List[str],__A : Any ):
if not isinstance(__A,__A ):
_lowerCamelCase : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other,__A )
raise ValueError
def __pow__( self : Optional[Any],__A : Optional[Any] ):
if n < 0 or isinstance(__A,__A ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_lowerCamelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
"""simple docstring"""
if not callable(_lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_lowerCamelCase : Optional[Any] = Dual(_lowerCAmelCase , 1 )
_lowerCamelCase : List[str] = func(_lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2)) | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
UpperCAmelCase_ : Tuple = data_utils.TransfoXLTokenizer
UpperCAmelCase_ : Any = data_utils.TransfoXLCorpus
UpperCAmelCase_ : Union[str, Any] = data_utils
UpperCAmelCase_ : int = data_utils
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCAmelCase , "rb" ) as fp:
_lowerCamelCase : List[Any] = pickle.load(_lowerCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
_lowerCamelCase : Any = corpus.vocab.__dict__
torch.save(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , _lowerCAmelCase )
_lowerCamelCase : Tuple = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(_lowerCAmelCase , _lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowerCamelCase : Union[str, Any] = os.path.abspath(_lowerCAmelCase )
_lowerCamelCase : Any = os.path.abspath(_lowerCAmelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowerCamelCase : List[Any] = TransfoXLConfig()
else:
_lowerCamelCase : List[str] = TransfoXLConfig.from_json_file(_lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase : List[str] = TransfoXLLMHeadModel(_lowerCAmelCase )
_lowerCamelCase : List[str] = load_tf_weights_in_transfo_xl(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(F'Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'Save configuration file to {os.path.abspath(_lowerCAmelCase )}' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
UpperCAmelCase_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ : Tuple = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'tapas'
def __init__( self : str,__A : Tuple=3_0_5_2_2,__A : Any=7_6_8,__A : Dict=1_2,__A : Optional[int]=1_2,__A : List[Any]=3_0_7_2,__A : Any="gelu",__A : List[str]=0.1,__A : List[str]=0.1,__A : Union[str, Any]=1_0_2_4,__A : List[Any]=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0],__A : str=0.02,__A : Dict=1e-12,__A : Dict=0,__A : str=10.0,__A : Dict=0,__A : Optional[Any]=1.0,__A : List[str]=None,__A : Optional[int]=1.0,__A : Optional[Any]=False,__A : Any=None,__A : List[str]=1.0,__A : Dict=1.0,__A : List[str]=False,__A : int=False,__A : List[Any]="ratio",__A : Optional[int]=None,__A : List[str]=None,__A : Union[str, Any]=6_4,__A : str=3_2,__A : List[str]=False,__A : int=True,__A : Dict=False,__A : str=False,__A : Optional[Any]=True,__A : str=False,__A : str=None,__A : Optional[Any]=None,**__A : Any,):
super().__init__(pad_token_id=__A,**__A )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_sizes
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = layer_norm_eps
# Fine-tuning task hyperparameters
_lowerCamelCase : Optional[int] = positive_label_weight
_lowerCamelCase : Dict = num_aggregation_labels
_lowerCamelCase : str = aggregation_loss_weight
_lowerCamelCase : List[str] = use_answer_as_supervision
_lowerCamelCase : Optional[int] = answer_loss_importance
_lowerCamelCase : Union[str, Any] = use_normalized_answer_loss
_lowerCamelCase : str = huber_loss_delta
_lowerCamelCase : int = temperature
_lowerCamelCase : str = aggregation_temperature
_lowerCamelCase : Optional[int] = use_gumbel_for_cells
_lowerCamelCase : Dict = use_gumbel_for_aggregation
_lowerCamelCase : int = average_approximation_function
_lowerCamelCase : str = cell_selection_preference
_lowerCamelCase : Tuple = answer_loss_cutoff
_lowerCamelCase : Tuple = max_num_rows
_lowerCamelCase : Any = max_num_columns
_lowerCamelCase : Optional[Any] = average_logits_per_cell
_lowerCamelCase : Union[str, Any] = select_one_column
_lowerCamelCase : List[Any] = allow_empty_column_selection
_lowerCamelCase : Optional[int] = init_cell_selection_weights_to_zero
_lowerCamelCase : Dict = reset_position_index_per_cell
_lowerCamelCase : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
_lowerCamelCase : str = aggregation_labels
_lowerCamelCase : Optional[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels,__A ):
_lowerCamelCase : Tuple = {int(__A ): v for k, v in aggregation_labels.items()} | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = JukeboxTokenizer
lowerCAmelCase_ = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
import torch
_lowerCamelCase : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
_lowerCamelCase : Optional[int] = tokenizer(**self.metas )["input_ids"]
# fmt: off
_lowerCamelCase : List[str] = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0],EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1],EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2],EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
import torch
_lowerCamelCase : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
_lowerCamelCase : List[str] = tokenizer(**self.metas )["input_ids"]
# fmt: off
_lowerCamelCase : int = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0],EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1],EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2],EXPECTED_OUTPUT[2] ) ) | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.