code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> int:
if not nums:
return 0
_lowerCAmelCase : Union[str, Any] = nums[0]
_lowerCAmelCase : Any = 0
for num in nums[1:]:
_lowerCAmelCase : List[Any] = (
max_excluding + num,
max(_lowerCamelCase ,_lowerCamelCase ),
)
return max(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
_a : Any = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
_a : Union[str, Any] = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_a : Union[str, Any] = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_lowerCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCAmelCase : Optional[Any] = vocab_file
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
_lowerCAmelCase : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_lowerCAmelCase : List[str] = len(self.sp_model ) - 1
_lowerCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self ):
return len(self.sp_model )
def __A ( self ):
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ ):
return self.sp_model.encode(a__ , out_type=a__ )
def __A ( self , a__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Optional[int] = self.sp_model.PieceToId(a__ )
return spm_id if spm_id else self.unk_token_id
def __A ( self , a__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = """"""
_lowerCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_lowerCAmelCase : int = True
_lowerCAmelCase : Any = []
else:
current_sub_tokens.append(a__ )
_lowerCAmelCase : Any = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : Any = None
return state
def __setstate__( self , a__ ):
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
_lowerCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_a : Optional[int] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[int]=None ) -> str:
require_version(deps[pkg] ,_lowerCamelCase )
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_a : str = None
_a : List[str] = logging.get_logger(__name__)
_a : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a : Optional[int] = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_a : Union[str, Any] = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
_a : List[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = ["input_ids", "attention_mask"]
_UpperCamelCase : Tuple = MBartTokenizer
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=None , a__=None , a__=None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Dict = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
vocab_file=a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , src_lang=a__ , tgt_lang=a__ , additional_special_tokens=a__ , **a__ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Dict = False if not self.vocab_file else True
_lowerCAmelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_lowerCAmelCase : str = {
lang_code: self.convert_tokens_to_ids(a__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : Optional[int] = src_lang if src_lang is not None else """en_XX"""
_lowerCAmelCase : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , a__ ):
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__ , a__ , a__ , **a__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Optional[int] = src_lang
_lowerCAmelCase : int = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
_lowerCAmelCase : Dict = self.convert_tokens_to_ids(a__ )
_lowerCAmelCase : Optional[int] = tgt_lang_id
return inputs
def __A ( self , a__ , a__ = "en_XX" , a__ = None , a__ = "ro_RO" , **a__ , ):
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self.convert_tokens_to_ids(a__ )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
_lowerCAmelCase : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self , a__ ):
_lowerCAmelCase : Any = self.convert_tokens_to_ids(a__ )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
_lowerCAmelCase : Tuple = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_a : Any = parser.parse_args()
if args.model_type == "bert":
_a : Any = BertForMaskedLM.from_pretrained(args.model_name)
_a : str = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
_a : Tuple = model.state_dict()
_a : Optional[int] = {}
for w in ["word_embeddings", "position_embeddings"]:
_a : str = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a : Any = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a : Dict = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a : Tuple = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a : Optional[Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a : Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a : List[str] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a : Dict = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a : Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a : int = state_dict['cls.predictions.decoder.weight']
_a : List[str] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
_a : Optional[int] = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a : Tuple = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
class __A :
def __init__( self ):
_lowerCAmelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_lowerCAmelCase : Tuple = False
def __A ( self , a__ ):
for word in words:
self.insert(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self
for char in word:
if char not in curr.nodes:
_lowerCAmelCase : List[str] = TrieNode()
_lowerCAmelCase : str = curr.nodes[char]
_lowerCAmelCase : int = True
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self
for char in word:
if char not in curr.nodes:
return False
_lowerCAmelCase : Tuple = curr.nodes[char]
return curr.is_leaf
def __A ( self , a__ ):
def _delete(a__ , a__ , a__ ) -> bool:
if index == len(a__ ):
# If word does not exist
if not curr.is_leaf:
return False
_lowerCAmelCase : Optional[Any] = False
return len(curr.nodes ) == 0
_lowerCAmelCase : Union[str, Any] = word[index]
_lowerCAmelCase : Tuple = curr.nodes.get(a__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_lowerCAmelCase : Dict = _delete(a__ , a__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , a__ , 0 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : TrieNode ,_lowerCamelCase : str ) -> None:
if node.is_leaf:
print(_lowerCamelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_lowerCamelCase ,word + key )
def SCREAMING_SNAKE_CASE ( ) -> bool:
_lowerCAmelCase : Dict = """banana bananas bandana band apple all beast""".split()
_lowerCAmelCase : Dict = TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : bool ) -> None:
print(str(_lowerCamelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ) -> List[Any]:
_lowerCAmelCase : Optional[int] = multiprocessing.Manager()
_lowerCAmelCase : Optional[Any] = manager.list()
_lowerCAmelCase : Dict = multiprocessing.Process(target=_lowerCamelCase ,args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ) -> Any:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_lowerCAmelCase : List[Any] = shutil.rmtree
_lowerCAmelCase : List[Any] = os.rmdir
_lowerCAmelCase : Optional[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_lowerCAmelCase : List[Any] = {}
with swallow_io():
with time_limit(_lowerCamelCase ):
exec(_lowerCamelCase ,_lowerCamelCase )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
_lowerCAmelCase : Tuple = rmtree
_lowerCAmelCase : Optional[Any] = rmdir
_lowerCAmelCase : List[Any] = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Union[str, Any]:
def signal_handler(_lowerCamelCase : List[str] ,_lowerCamelCase : str ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL ,_lowerCamelCase )
signal.signal(signal.SIGALRM ,_lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL ,0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(_lowerCamelCase ):
with contextlib.redirect_stderr(_lowerCamelCase ):
with redirect_stdin(_lowerCamelCase ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(_lowerCamelCase ):
yield dirname
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( io.StringIO ):
def __A ( self , *a__ , **a__ ):
raise OSError
def __A ( self , *a__ , **a__ ):
raise OSError
def __A ( self , *a__ , **a__ ):
raise OSError
def __A ( self , *a__ , **a__ ):
return False
class __A ( contextlib._RedirectStream ): # type: ignore
_UpperCamelCase : Dict = "stdin"
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Tuple:
if root == ".":
yield
return
_lowerCAmelCase : List[Any] = os.getcwd()
os.chdir(_lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]=None ) -> int:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_lowerCAmelCase : Any = None
_lowerCAmelCase : Tuple = None
import os
_lowerCAmelCase : int = """1"""
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : str = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : str = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : str = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : str = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[Any] = None
import shutil
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Optional[int] = None
import subprocess
_lowerCAmelCase : List[Any] = None # type: ignore
_lowerCAmelCase : Tuple = None
import sys
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Dict = None
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger('transformers.models.speecht5')
_a : List[str] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_a : Optional[int] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_a : Tuple = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_a : List[str] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_a : Dict = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_a : Any = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_a : Dict = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_a : Any = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_a : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_a : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_a : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_a : Optional[Any] = []
_a : Any = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_a : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_a : Union[str, Any] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_a : List[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : str ,_lowerCamelCase : List[Any] ) -> Optional[int]:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase ,_lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : str = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
else:
_lowerCAmelCase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Tuple = value
elif weight_type == "running_mean":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "running_var":
_lowerCAmelCase : str = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase : Dict = value
else:
_lowerCAmelCase : List[Any] = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ) -> Dict:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCAmelCase : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ) -> str:
_lowerCAmelCase : str = []
if task == "s2t":
_lowerCAmelCase : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase : str = MAPPING_S2T
_lowerCAmelCase : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Optional[int] = MAPPING_T2S
_lowerCAmelCase : Dict = IGNORE_KEYS_T2S
elif task == "s2s":
_lowerCAmelCase : Dict = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase : str = MAPPING_S2S
_lowerCAmelCase : List[str] = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCamelCase ,_lowerCamelCase ):
logger.info(f"{name} was ignored" )
continue
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : int = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowerCAmelCase : Optional[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
_lowerCAmelCase : List[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowerCAmelCase : List[str] = True
if "*" in mapped_key:
_lowerCAmelCase : Optional[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : List[Any] = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : Tuple = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : Optional[int] = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : Dict = """bias"""
elif "weight" in name:
_lowerCAmelCase : Dict = """weight"""
elif "running_mean" in name:
_lowerCAmelCase : str = """running_mean"""
elif "running_var" in name:
_lowerCAmelCase : int = """running_var"""
elif "num_batches_tracked" in name:
_lowerCAmelCase : Optional[int] = """num_batches_tracked"""
else:
_lowerCAmelCase : Union[str, Any] = None
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : List[Any] = name.split(""".""" )
_lowerCAmelCase : str = int(items[0] )
_lowerCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any=None ,_lowerCamelCase : Optional[Any]=None ,_lowerCamelCase : Optional[int]=None ,) -> Union[str, Any]:
if config_path is not None:
_lowerCAmelCase : Dict = SpeechTaConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[Any] = SpeechTaConfig()
if task == "s2t":
_lowerCAmelCase : List[str] = config.max_text_positions
_lowerCAmelCase : Tuple = SpeechTaForSpeechToText(_lowerCamelCase )
elif task == "t2s":
_lowerCAmelCase : Union[str, Any] = 1876
_lowerCAmelCase : Tuple = 600
_lowerCAmelCase : Any = config.max_speech_positions
_lowerCAmelCase : Optional[int] = SpeechTaForTextToSpeech(_lowerCamelCase )
elif task == "s2s":
_lowerCAmelCase : int = 1876
_lowerCAmelCase : Optional[int] = config.max_speech_positions
_lowerCAmelCase : Optional[int] = SpeechTaForSpeechToSpeech(_lowerCamelCase )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
_lowerCAmelCase : List[str] = SpeechTaTokenizer(_lowerCamelCase ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowerCAmelCase : int = AddedToken("""<mask>""" ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_lowerCAmelCase : str = SpeechTaFeatureExtractor()
_lowerCAmelCase : Optional[int] = SpeechTaProcessor(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = torch.load(_lowerCamelCase )
recursively_load_weights(fairseq_checkpoint["""model"""] ,_lowerCamelCase ,_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_a : Optional[int] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["speech"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""speech"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["speech"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""speech"""] )
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int ) -> list:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : Dict = [[0] * n for i in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = y_points[i]
for i in range(2 ,_lowerCamelCase ):
for j in range(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_a : Any = random.Random()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=1.0 ,_lowerCamelCase : List[Any]=None ,_lowerCamelCase : Dict=None ) -> int:
if rng is None:
_lowerCAmelCase : List[str] = global_rng
_lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=400 , a__=2000 , a__=1 , a__=0.0 , a__=16000 , a__=True , a__=80 , a__=16 , a__=64 , a__="hann_window" , a__=80 , a__=7600 , a__=1e-10 , a__=True , ):
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Union[str, Any] = min_seq_length
_lowerCAmelCase : Any = max_seq_length
_lowerCAmelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Optional[Any] = feature_size
_lowerCAmelCase : int = padding_value
_lowerCAmelCase : List[Any] = sampling_rate
_lowerCAmelCase : Tuple = do_normalize
_lowerCAmelCase : Dict = num_mel_bins
_lowerCAmelCase : Dict = hop_length
_lowerCAmelCase : Any = win_length
_lowerCAmelCase : Optional[int] = win_function
_lowerCAmelCase : List[Any] = fmin
_lowerCAmelCase : List[str] = fmax
_lowerCAmelCase : List[str] = mel_floor
_lowerCAmelCase : Any = return_attention_mask
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __A ( self , a__=False , a__=False ):
def _flatten(a__ ):
return list(itertools.chain(*a__ ) )
if equal_length:
_lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCAmelCase : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : List[str] = [np.asarray(a__ ) for x in speech_inputs]
return speech_inputs
def __A ( self , a__=False , a__=False ):
if equal_length:
_lowerCAmelCase : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase : List[str] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Union[str, Any] = [np.asarray(a__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = SpeechTaFeatureExtractor
def __A ( self ):
_lowerCAmelCase : Any = SpeechTaFeatureExtractionTester(self )
def __A ( self , a__ ):
self.assertTrue(np.all(np.mean(a__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a__ , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : List[Any] = [np.asarray(a__ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_lowerCAmelCase : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test batched
_lowerCAmelCase : Union[str, Any] = feat_extract(a__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : Tuple = feat_extract(a__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Any = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : str = [None, 1600, None]
for max_length, padding in zip(a__ , a__ ):
_lowerCAmelCase : List[Any] = feat_extract(a__ , padding=a__ , max_length=a__ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __A ( self ):
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Union[str, Any] = range(800 , 1400 , 200 )
_lowerCAmelCase : int = [floats_list((1, x) )[0] for x in lengths]
_lowerCAmelCase : int = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : Optional[int] = [None, 1600, None]
for max_length, padding in zip(a__ , a__ ):
_lowerCAmelCase : Any = feat_extract(a__ , max_length=a__ , padding=a__ )
_lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __A ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : int = feat_extract(
a__ , truncation=a__ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
_lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __A ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Optional[Any] = feat_extract(
a__ , truncation=a__ , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : List[Any] = feat_extract(
a__ , truncation=a__ , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __A ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_lowerCAmelCase : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCAmelCase : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Tuple = [np.asarray(a__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase : List[str] = feature_extractor(audio_target=a__ , padding=a__ , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_lowerCAmelCase : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
_lowerCAmelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test batched
_lowerCAmelCase : List[Any] = feature_extractor(a__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : Any = feature_extractor(a__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase : str = np.asarray(a__ )
_lowerCAmelCase : List[Any] = feature_extractor(a__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : List[Any] = feature_extractor(a__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
_lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a__ ) == len(a__ ) for x, y in zip(a__ , processed_features[input_name] ) ) )
_lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a__ )
_lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self ):
_lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a__ )
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
_lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self ):
_lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : str = feat_extract.model_input_names[0]
_lowerCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : Any = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : Optional[int] = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase : str = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __A ( self ):
_lowerCAmelCase : List[str] = self.feat_extract_dict
_lowerCAmelCase : Dict = True
_lowerCAmelCase : str = self.feature_extraction_class(**a__ )
_lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : Optional[int] = [len(a__ ) for x in speech_inputs]
_lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
_lowerCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : str = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : Tuple = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , a__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.feat_extract_dict
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Dict = self.feature_extraction_class(**a__ )
_lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : int = [len(a__ ) for x in speech_inputs]
_lowerCAmelCase : Optional[int] = feat_extract.model_input_names[0]
_lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : List[Any] = min(a__ )
_lowerCAmelCase : Optional[int] = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : Tuple = feat_extract.pad(
a__ , padding="""max_length""" , max_length=a__ , truncation=a__ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , a__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __A ( self , a__ ):
from datasets import load_dataset
_lowerCAmelCase : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_lowerCAmelCase : Union[str, Any] = ds.sort("""id""" ).select(range(a__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ):
# fmt: off
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
_lowerCAmelCase : Tuple = self._load_datasamples(1 )
_lowerCAmelCase : int = SpeechTaFeatureExtractor()
_lowerCAmelCase : int = feature_extractor(a__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a__ , atol=1e-6 ) )
def __A ( self ):
# fmt: off
_lowerCAmelCase : str = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
_lowerCAmelCase : Optional[int] = self._load_datasamples(1 )
_lowerCAmelCase : Optional[Any] = SpeechTaFeatureExtractor()
_lowerCAmelCase : Tuple = feature_extractor(audio_target=a__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a__ , atol=1e-4 ) )
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=True , a__=None , a__=True , ):
_lowerCAmelCase : List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Union[str, Any] = min_resolution
_lowerCAmelCase : str = max_resolution
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Tuple = size
_lowerCAmelCase : Optional[Any] = do_normalize
def __A ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : Any = ImageGPTImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """clusters""" ) )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
def __A ( self ):
_lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __A ( self ):
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
_lowerCAmelCase : List[str] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , obj[key] ) )
else:
self.assertEqual(obj[key] , a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Union[str, Any] = os.path.join(a__ , """image_processor.json""" )
image_processor_first.to_json_file(a__ )
_lowerCAmelCase : str = self.image_processing_class.from_json_file(a__ ).to_dict()
_lowerCAmelCase : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a__ )
def __A ( self ):
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(a__ )
_lowerCAmelCase : str = self.image_processing_class.from_pretrained(a__ ).to_dict()
_lowerCAmelCase : str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __A ( self ):
pass
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : str = load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_lowerCAmelCase : List[Any] = Image.open(dataset[4]["""file"""] )
_lowerCAmelCase : Tuple = Image.open(dataset[5]["""file"""] )
_lowerCAmelCase : List[str] = [imagea, imagea]
return images
@require_vision
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_lowerCAmelCase : Dict = prepare_images()
# test non-batched
_lowerCAmelCase : Tuple = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
_lowerCAmelCase : Optional[int] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , a__ )
# test batched
_lowerCAmelCase : List[Any] = image_processing(a__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
_lowerCAmelCase : List[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , a__ )
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def __A ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ):
_lowerCAmelCase : List[Any] = ort.SessionOptions()
_lowerCAmelCase : Union[str, Any] = False
return options
def __A ( self ):
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[str] = """A red cat sitting on a park bench"""
_lowerCAmelCase : str = np.random.RandomState(0 )
_lowerCAmelCase : List[Any] = pipe(
prompt=a__ , image=a__ , mask_image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> int:
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 ,len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_lowerCAmelCase : Optional[int] = grid[0]
for row_n in range(1 ,len(_lowerCamelCase ) ):
_lowerCAmelCase : str = grid[row_n]
_lowerCAmelCase : Any = fill_row(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = grid[row_n]
return grid[-1][-1]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 ,len(_lowerCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] ,row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "Wav2Vec2FeatureExtractor"
_UpperCamelCase : Optional[Any] = "AutoTokenizer"
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
_lowerCAmelCase : str = self.feature_extractor
_lowerCAmelCase : Dict = False
@classmethod
def __A ( cls , a__ , **a__ ):
try:
return super().from_pretrained(a__ , **a__ )
except OSError:
warnings.warn(
F"Loading a tokenizer inside {cls.__name__} from a config that does not"
""" include a `tokenizer_class` attribute is deprecated and will be """
"""removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"""
""" attribute to either your `config.json` or `tokenizer_config.json` """
"""file to suppress this warning: """ , a__ , )
_lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(a__ , **a__ )
_lowerCAmelCase : Tuple = WavaVecaCTCTokenizer.from_pretrained(a__ , **a__ )
return cls(feature_extractor=a__ , tokenizer=a__ )
def __call__( self , *a__ , **a__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
_lowerCAmelCase : str = kwargs.pop("""raw_speech""" )
else:
_lowerCAmelCase : Optional[int] = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : str = kwargs.pop("""sampling_rate""" , a__ )
_lowerCAmelCase : Any = kwargs.pop("""text""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : List[Any] = args[0]
_lowerCAmelCase : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
_lowerCAmelCase : Optional[Any] = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if text is not None:
_lowerCAmelCase : int = self.tokenizer(a__ , **a__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase : Optional[int] = encodings["""input_ids"""]
return inputs
def __A ( self , *a__ , **a__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*a__ , **a__ )
_lowerCAmelCase : List[Any] = kwargs.pop("""input_features""" , a__ )
_lowerCAmelCase : List[Any] = kwargs.pop("""labels""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : Union[str, Any] = args[0]
_lowerCAmelCase : Optional[Any] = args[1:]
if input_features is not None:
_lowerCAmelCase : Dict = self.feature_extractor.pad(a__ , *a__ , **a__ )
if labels is not None:
_lowerCAmelCase : Optional[Any] = self.tokenizer.pad(a__ , **a__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase : List[Any] = labels["""input_ids"""]
return input_features
def __A ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@contextmanager
def __A ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
_lowerCAmelCase : str = True
_lowerCAmelCase : str = self.tokenizer
yield
_lowerCAmelCase : Union[str, Any] = self.feature_extractor
_lowerCAmelCase : Optional[Any] = False
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Callable ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Optional[Any] = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : Any = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : int = y[k] + step_size * ode_func(_lowerCamelCase ,y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : Optional[Any] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
_lowerCAmelCase : List[Any] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : int = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> List[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ) -> str:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : int = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
_lowerCAmelCase : List[Any] = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_lowerCAmelCase : Any = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase ,requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : Optional[Any] = qkv_bias
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> List[str]:
_lowerCAmelCase : int = 364 if """coco""" in model_name else 224
_lowerCAmelCase : int = InstructBlipVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" ,dense_act_fn="""gelu""" ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_lowerCAmelCase : Dict = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" ,vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
_lowerCAmelCase : Tuple = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" ,vocab_size=32001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_lowerCAmelCase : List[Any] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
_lowerCAmelCase : Optional[Any] = InstructBlipConfig(vision_config=_lowerCamelCase ,text_config=_lowerCamelCase ,qformer_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[Any]=None ,_lowerCamelCase : Any=False ) -> Any:
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-uncased""" ,truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
_lowerCAmelCase : str = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" ,truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_lowerCAmelCase : Any = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" ,truncation_side="""left""" ,bos_token="""</s>""" ,unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
_lowerCAmelCase : Dict = get_blipa_config(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = InstructBlipForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : List[Any] = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
_lowerCAmelCase : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Union[str, Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase : Optional[int] = """cuda:2""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase : Tuple = load_model_and_preprocess(
name=_lowerCamelCase ,model_type=_lowerCamelCase ,is_eval=_lowerCamelCase ,device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[str] = original_model.state_dict()
_lowerCAmelCase : Any = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : int = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : Tuple = key.replace("""Qformer.bert""" ,"""qformer""" )
if "attention.self" in key:
_lowerCAmelCase : int = key.replace("""self""" ,"""attention""" )
if "llm_proj" in key:
_lowerCAmelCase : Dict = key.replace("""llm_proj""" ,"""language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Dict = key.replace("""t5_proj""" ,"""language_projection""" )
if key.startswith("""llm_model""" ):
_lowerCAmelCase : str = key.replace("""llm_model""" ,"""language_model""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : Optional[Any] = key.replace("""t5""" ,"""language""" )
_lowerCAmelCase : Dict = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase ,_lowerCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_lowerCamelCase ,strict=_lowerCamelCase )
_lowerCAmelCase : int = load_demo_image()
_lowerCAmelCase : Dict = """What is unusual about this image?"""
# create processor
_lowerCAmelCase : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} ,image_mean=_lowerCamelCase ,image_std=_lowerCamelCase )
_lowerCAmelCase : List[str] = InstructBlipProcessor(
image_processor=_lowerCamelCase ,tokenizer=_lowerCamelCase ,qformer_tokenizer=_lowerCamelCase ,)
_lowerCAmelCase : Optional[Any] = processor(images=_lowerCamelCase ,text=_lowerCamelCase ,return_tensors="""pt""" ).to(_lowerCamelCase )
# make sure processor creates exact same pixel values
_lowerCAmelCase : Any = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,_lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
_lowerCAmelCase : Dict = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
_lowerCAmelCase : str = hf_model(**_lowerCamelCase ).logits
else:
_lowerCAmelCase : Dict = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Union[str, Any] = tokenizer("""\n""" ,return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
_lowerCAmelCase : str = hf_model(**_lowerCamelCase ,labels=_lowerCamelCase ).logits
print("""First values of original logits:""" ,original_logits[0, :3, :3] )
print("""First values of HF logits:""" ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_lowerCAmelCase : Optional[Any] = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) ,_lowerCamelCase ,atol=_lowerCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
_lowerCAmelCase : str = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
_lowerCAmelCase : int = hf_model.generate(
**_lowerCamelCase ,do_sample=_lowerCamelCase ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_lowerCAmelCase : List[str] = 2
print("""Original generation:""" ,_lowerCamelCase )
_lowerCAmelCase : List[str] = processor.batch_decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [text.strip() for text in output_text]
print("""HF generation:""" ,_lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f"Salesforce/{model_name}" )
hf_model.push_to_hub(f"Salesforce/{model_name}" )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
_a : str = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
_a : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "data2vec-vision"
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-12 , a__=224 , a__=16 , a__=3 , a__=False , a__=False , a__=False , a__=False , a__=0.1 , a__=0.1 , a__=True , a__=[3, 5, 7, 11] , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Tuple = use_mask_token
_lowerCAmelCase : Tuple = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : Dict = use_shared_relative_position_bias
_lowerCAmelCase : Optional[Any] = layer_scale_init_value
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : str = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : str = use_auxiliary_head
_lowerCAmelCase : Optional[int] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Tuple = auxiliary_num_convs
_lowerCAmelCase : Dict = auxiliary_concat_input
_lowerCAmelCase : Optional[int] = semantic_loss_ignore_index
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = version.parse("1.11" )
@property
def __A ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self ):
return 1e-4
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "pix2struct_text_model"
_UpperCamelCase : Optional[Any] = ["past_key_values"]
_UpperCamelCase : Optional[int] = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=50244 , a__=768 , a__=64 , a__=2048 , a__=12 , a__=12 , a__=32 , a__=128 , a__=0.1 , a__=1e-6 , a__=1.0 , a__="gelu_new" , a__=0 , a__=False , a__=0 , a__=1 , a__=False , a__=True , **a__ , ):
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : str = d_kv
_lowerCAmelCase : Optional[Any] = d_ff
_lowerCAmelCase : Optional[Any] = num_layers
_lowerCAmelCase : Tuple = num_heads
_lowerCAmelCase : Union[str, Any] = relative_attention_num_buckets
_lowerCAmelCase : Union[str, Any] = relative_attention_max_distance
_lowerCAmelCase : str = dropout_rate
_lowerCAmelCase : Optional[int] = layer_norm_epsilon
_lowerCAmelCase : Tuple = initializer_factor
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : List[Any] = eos_token_id
_lowerCAmelCase : str = decoder_start_token_id
# for backwards compatibility
_lowerCAmelCase : Tuple = dense_act_fn
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , tie_word_embeddings=a__ , is_decoder=a__ , **a__ , )
@classmethod
def __A ( cls , a__ , **a__ ):
cls._set_token_in_kwargs(a__ )
_lowerCAmelCase : Any = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_lowerCAmelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a__ , **a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = "pix2struct_vision_model"
def __init__( self , a__=768 , a__=768 , a__=2048 , a__=64 , a__=12 , a__=12 , a__="gelu_new" , a__=1e-6 , a__=0.0 , a__=0.0 , a__=1e-10 , a__=1.0 , a__=4096 , a__=32 , a__=128 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Optional[Any] = patch_embed_hidden_size
_lowerCAmelCase : Any = d_ff
_lowerCAmelCase : Optional[Any] = dropout_rate
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : Optional[Any] = initializer_factor
_lowerCAmelCase : Optional[Any] = attention_dropout
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : int = dense_act_fn
_lowerCAmelCase : Union[str, Any] = seq_len
_lowerCAmelCase : Optional[Any] = relative_attention_num_buckets
_lowerCAmelCase : Dict = relative_attention_max_distance
_lowerCAmelCase : Union[str, Any] = d_kv
@classmethod
def __A ( cls , a__ , **a__ ):
cls._set_token_in_kwargs(a__ )
_lowerCAmelCase : Tuple = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
_lowerCAmelCase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(a__ , **a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "pix2struct"
_UpperCamelCase : int = True
def __init__( self , a__=None , a__=None , a__=1.0 , a__=0.0_2 , a__=False , a__=False , a__=True , **a__ , ):
super().__init__(tie_word_embeddings=a__ , is_encoder_decoder=a__ , **a__ )
if text_config is None:
_lowerCAmelCase : Any = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
_lowerCAmelCase : List[str] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
_lowerCAmelCase : Tuple = PixaStructTextConfig(**a__ )
_lowerCAmelCase : List[str] = PixaStructVisionConfig(**a__ )
_lowerCAmelCase : Optional[int] = self.text_config.decoder_start_token_id
_lowerCAmelCase : List[str] = self.text_config.pad_token_id
_lowerCAmelCase : Dict = self.text_config.eos_token_id
_lowerCAmelCase : Optional[int] = initializer_factor
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = self.initializer_range
_lowerCAmelCase : Union[str, Any] = self.initializer_range
_lowerCAmelCase : List[Any] = is_vqa
@classmethod
def __A ( cls , a__ , a__ , **a__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : Optional[Any] = self.text_config.to_dict()
_lowerCAmelCase : Union[str, Any] = self.vision_config.to_dict()
_lowerCAmelCase : Any = self.__class__.model_type
return output
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self , a__ = 128 , a__ = 256 , a__ = 2000.0 , a__ = 768 , a__ = 12 , a__ = 12 , a__ = 64 , a__ = 2048 , a__ = 0.1 , ):
super().__init__()
_lowerCAmelCase : str = nn.Sequential(
nn.Linear(a__ , d_model * 4 , bias=a__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a__ ) , nn.SiLU() , )
_lowerCAmelCase : Optional[Any] = nn.Embedding(a__ , a__ )
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = nn.Linear(a__ , a__ , bias=a__ )
_lowerCAmelCase : int = nn.Dropout(p=a__ )
_lowerCAmelCase : str = nn.ModuleList()
for lyr_num in range(a__ ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Optional[int] = DecoderLayer(d_model=a__ , d_kv=a__ , num_heads=a__ , d_ff=a__ , dropout_rate=a__ )
self.decoders.append(a__ )
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(a__ )
_lowerCAmelCase : List[str] = nn.Dropout(p=a__ )
_lowerCAmelCase : Dict = nn.Linear(a__ , a__ , bias=a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase : List[Any] = self.conditioning_emb(a__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : str = torch.broadcast_to(
torch.arange(a__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase : List[Any] = self.position_encoding(a__ )
_lowerCAmelCase : Dict = self.continuous_inputs_projection(a__ )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(a__ )
# decoder: No padding present.
_lowerCAmelCase : List[Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Any = [(x, self.encoder_decoder_mask(a__ , a__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Optional[int] = lyr(
a__ , conditioning_emb=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )[0]
_lowerCAmelCase : List[str] = self.decoder_norm(a__ )
_lowerCAmelCase : Optional[int] = self.post_dropout(a__ )
_lowerCAmelCase : int = self.spec_out(a__ )
return spec_out
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__=1e-6 ):
super().__init__()
_lowerCAmelCase : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a__ , d_kv=a__ , num_heads=a__ , dropout_rate=a__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a__ , d_kv=a__ , num_heads=a__ , dropout_rate=a__ , layer_norm_epsilon=a__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a__ , d_ff=a__ , dropout_rate=a__ , layer_norm_epsilon=a__ ) )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : List[Any] = self.layer[0](
a__ , conditioning_emb=a__ , attention_mask=a__ , )
if encoder_hidden_states is not None:
_lowerCAmelCase : Dict = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
a__ , key_value_states=a__ , attention_mask=a__ , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](a__ , a__ )
return (hidden_states,)
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Tuple = TaLayerNorm(a__ )
_lowerCAmelCase : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=a__ )
_lowerCAmelCase : Tuple = Attention(query_dim=a__ , heads=a__ , dim_head=a__ , out_bias=a__ , scale_qk=a__ )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(a__ )
def __A ( self , a__ , a__=None , a__=None , ):
# pre_self_attention_layer_norm
_lowerCAmelCase : List[str] = self.layer_norm(a__ )
if conditioning_emb is not None:
_lowerCAmelCase : Tuple = self.FiLMLayer(a__ , a__ )
# Self-attention block
_lowerCAmelCase : List[str] = self.attention(a__ )
_lowerCAmelCase : str = hidden_states + self.dropout(a__ )
return hidden_states
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Tuple = Attention(query_dim=a__ , heads=a__ , dim_head=a__ , out_bias=a__ , scale_qk=a__ )
_lowerCAmelCase : Any = TaLayerNorm(a__ , eps=a__ )
_lowerCAmelCase : Tuple = nn.Dropout(a__ )
def __A ( self , a__ , a__=None , a__=None , ):
_lowerCAmelCase : int = self.layer_norm(a__ )
_lowerCAmelCase : List[str] = self.attention(
a__ , encoder_hidden_states=a__ , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase : Dict = hidden_states + self.dropout(a__ )
return layer_output
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ , a__ ):
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=a__ , d_ff=a__ , dropout_rate=a__ )
_lowerCAmelCase : str = TaFiLMLayer(in_features=d_model * 4 , out_features=a__ )
_lowerCAmelCase : Dict = TaLayerNorm(a__ , eps=a__ )
_lowerCAmelCase : int = nn.Dropout(a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Optional[int] = self.layer_norm(a__ )
if conditioning_emb is not None:
_lowerCAmelCase : Any = self.film(a__ , a__ )
_lowerCAmelCase : Optional[int] = self.DenseReluDense(a__ )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(a__ )
return hidden_states
class __A ( nn.Module ):
def __init__( self , a__ , a__ , a__ ):
super().__init__()
_lowerCAmelCase : int = nn.Linear(a__ , a__ , bias=a__ )
_lowerCAmelCase : List[str] = nn.Linear(a__ , a__ , bias=a__ )
_lowerCAmelCase : Any = nn.Linear(a__ , a__ , bias=a__ )
_lowerCAmelCase : Dict = nn.Dropout(a__ )
_lowerCAmelCase : Any = NewGELUActivation()
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.act(self.wi_a(a__ ) )
_lowerCAmelCase : Dict = self.wi_a(a__ )
_lowerCAmelCase : Any = hidden_gelu * hidden_linear
_lowerCAmelCase : Optional[Any] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.wo(a__ )
return hidden_states
class __A ( nn.Module ):
def __init__( self , a__ , a__=1e-6 ):
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.ones(a__ ) )
_lowerCAmelCase : str = eps
def __A ( self , a__ ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a__ )
_lowerCAmelCase : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __A ( nn.Module ):
def __A ( self , a__ ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(a__ , 3.0 )) ))
class __A ( nn.Module ):
def __init__( self , a__ , a__ ):
super().__init__()
_lowerCAmelCase : List[Any] = nn.Linear(a__ , out_features * 2 , bias=a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = self.scale_bias(a__ )
_lowerCAmelCase : str = torch.chunk(a__ , 2 , -1 )
_lowerCAmelCase : str = x * (1 + scale) + shift
return x
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Any = parent
_lowerCAmelCase : str = 13
_lowerCAmelCase : Dict = 7
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Union[str, Any] = 99
_lowerCAmelCase : List[str] = 32
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : str = 4
_lowerCAmelCase : Tuple = 37
_lowerCAmelCase : Dict = """gelu"""
_lowerCAmelCase : Union[str, Any] = 0.1
_lowerCAmelCase : Dict = 0.1
_lowerCAmelCase : Tuple = 512
_lowerCAmelCase : Optional[int] = 16
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : Optional[int] = 0.0_2
_lowerCAmelCase : int = 3
_lowerCAmelCase : Tuple = 4
_lowerCAmelCase : Union[str, Any] = None
def __A ( self ):
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Any = None
if self.use_input_mask:
_lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : int = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFDistilBertModel(config=a__ )
_lowerCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase : List[Any] = model(a__ )
_lowerCAmelCase : Optional[int] = [input_ids, input_mask]
_lowerCAmelCase : Tuple = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = TFDistilBertForMaskedLM(config=a__ )
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase : List[str] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = TFDistilBertForQuestionAnswering(config=a__ )
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
_lowerCAmelCase : Optional[Any] = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFDistilBertForSequenceClassification(a__ )
_lowerCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : Optional[Any] = TFDistilBertForMultipleChoice(a__ )
_lowerCAmelCase : str = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
_lowerCAmelCase : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : int = TFDistilBertForTokenClassification(a__ )
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase : str = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(_lowerCAmelCase) : List[str] = config_and_inputs
_lowerCAmelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_UpperCamelCase : Tuple = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase : Any = False
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : Optional[int] = TFDistilBertModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=a__ , dim=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a__ )
@slow
def __A ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_lowerCAmelCase : int = TFDistilBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : int = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Optional[Any] = model(a__ )[0]
_lowerCAmelCase : Dict = [1, 6, 768]
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-4 )
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Dict = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=16 , a__=[32, 64, 128] , a__=[1, 2, 1] , a__=[2, 2, 4] , a__=2 , a__=2.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=True , a__=0.0_2 , a__=1e-5 , a__=True , a__=None , a__=True , a__=10 , a__=8 , a__=["stage1", "stage2"] , a__=[1, 2] , ):
_lowerCAmelCase : str = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Tuple = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Optional[int] = window_size
_lowerCAmelCase : Optional[int] = mlp_ratio
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = use_absolute_embeddings
_lowerCAmelCase : Union[str, Any] = patch_norm
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Tuple = encoder_stride
_lowerCAmelCase : str = out_features
_lowerCAmelCase : Tuple = out_indices
def __A ( self ):
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = FocalNetModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(a__ )
_lowerCAmelCase : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[int] = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = FocalNetForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Any = FocalNetForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : str = self.type_sequence_label_size
_lowerCAmelCase : Tuple = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Any = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : str = 1
_lowerCAmelCase : int = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Any = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = FocalNetModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , embed_dim=37 , has_text_modality=a__ )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase : List[Any] = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase : str = model_class(a__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Union[str, Any] = outputs.hidden_states
_lowerCAmelCase : List[str] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# FocalNet has a different seq_length
_lowerCAmelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
_lowerCAmelCase : Union[str, Any] = reshaped_hidden_states[0].shape
_lowerCAmelCase : List[str] = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase : int = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase : str = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : int = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@slow
def __A ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = FocalNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = _config_zero_init(a__ )
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : List[str] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase : str = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**a__ )
# verify the logits
_lowerCAmelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : Any = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = (FocalNetBackbone,) if is_torch_available() else ()
_UpperCamelCase : int = FocalNetConfig
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : Any = FocalNetModelTester(self )
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_a : Dict = parser.parse_args()
_a : List[str] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : BigBirdConfig
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
def __A ( self ):
super().setup()
_lowerCAmelCase : int = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *a__ , **a__ ):
_lowerCAmelCase : Union[str, Any] = super().__call__(*a__ , **a__ )
_lowerCAmelCase : Any = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ) -> List[str]:
def cross_entropy(_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Any]=None ):
_lowerCAmelCase : List[Any] = logits.shape[-1]
_lowerCAmelCase : str = (labels[..., None] == jnp.arange(_lowerCamelCase )[None]).astype("""f4""" )
_lowerCAmelCase : int = jax.nn.log_softmax(_lowerCamelCase ,axis=-1 )
_lowerCAmelCase : str = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
_lowerCAmelCase : Tuple = reduction(_lowerCamelCase )
return loss
_lowerCAmelCase : str = partial(_lowerCamelCase ,reduction=jnp.mean )
_lowerCAmelCase : List[Any] = cross_entropy(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = cross_entropy(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[str] = cross_entropy(_lowerCamelCase ,_lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __A :
_UpperCamelCase : str = "google/bigbird-roberta-base"
_UpperCamelCase : int = 3_000
_UpperCamelCase : int = 10_500
_UpperCamelCase : int = 128
_UpperCamelCase : int = 3
_UpperCamelCase : int = 1
_UpperCamelCase : int = 5
# tx_args
_UpperCamelCase : float = 3E-5
_UpperCamelCase : float = 0.0
_UpperCamelCase : int = 20_000
_UpperCamelCase : float = 0.0_0_9_5
_UpperCamelCase : str = "bigbird-roberta-natural-questions"
_UpperCamelCase : str = "training-expt"
_UpperCamelCase : str = "data/nq-training.jsonl"
_UpperCamelCase : str = "data/nq-validation.jsonl"
def __A ( self ):
os.makedirs(self.base_dir , exist_ok=a__ )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.base_dir , self.save_dir )
_lowerCAmelCase : int = self.batch_size_per_device * jax.device_count()
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , a__ ):
_lowerCAmelCase : Dict = self.collate_fn(a__ )
_lowerCAmelCase : Dict = jax.tree_util.tree_map(a__ , a__ )
return batch
def __A ( self , a__ ):
_lowerCAmelCase : str = self.fetch_inputs(features["""input_ids"""] )
_lowerCAmelCase : int = {
"""input_ids""": jnp.array(a__ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(a__ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = [self._fetch_inputs(a__ ) for ids in input_ids]
return zip(*a__ )
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = [1 for _ in range(len(a__ ) )]
while len(a__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : Any=None ) -> Any:
if seed is not None:
_lowerCAmelCase : Any = dataset.shuffle(seed=_lowerCamelCase )
for i in range(len(_lowerCamelCase ) // batch_size ):
_lowerCAmelCase : int = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_lowerCamelCase )
@partial(jax.pmap ,axis_name="""batch""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,**_lowerCamelCase : int ) -> str:
def loss_fn(_lowerCamelCase : Tuple ):
_lowerCAmelCase : List[Any] = model_inputs.pop("""start_labels""" )
_lowerCAmelCase : List[str] = model_inputs.pop("""end_labels""" )
_lowerCAmelCase : List[Any] = model_inputs.pop("""pooled_labels""" )
_lowerCAmelCase : List[Any] = state.apply_fn(**_lowerCamelCase ,params=_lowerCamelCase ,dropout_rng=_lowerCamelCase ,train=_lowerCamelCase )
_lowerCAmelCase : Tuple = outputs
return state.loss_fn(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,)
_lowerCAmelCase : List[Any] = jax.random.split(_lowerCamelCase )
_lowerCAmelCase : Tuple = jax.value_and_grad(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = grad_fn(state.params )
_lowerCAmelCase : Tuple = jax.lax.pmean({"""loss""": loss} ,axis_name="""batch""" )
_lowerCAmelCase : Dict = jax.lax.pmean(_lowerCamelCase ,"""batch""" )
_lowerCAmelCase : Tuple = state.apply_gradients(grads=_lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name="""batch""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,**_lowerCamelCase : List[str] ) -> List[str]:
_lowerCAmelCase : Tuple = model_inputs.pop("""start_labels""" )
_lowerCAmelCase : Optional[Any] = model_inputs.pop("""end_labels""" )
_lowerCAmelCase : int = model_inputs.pop("""pooled_labels""" )
_lowerCAmelCase : Tuple = state.apply_fn(**_lowerCamelCase ,params=state.params ,train=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = outputs
_lowerCAmelCase : Tuple = state.loss_fn(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = jax.lax.pmean({"""loss""": loss} ,axis_name="""batch""" )
return metrics
class __A ( train_state.TrainState ):
_UpperCamelCase : Callable = struct.field(pytree_node=SCREAMING_SNAKE_CASE_ )
@dataclass
class __A :
_UpperCamelCase : Args
_UpperCamelCase : Callable
_UpperCamelCase : Callable
_UpperCamelCase : Callable
_UpperCamelCase : Callable
_UpperCamelCase : wandb
_UpperCamelCase : Callable = None
def __A ( self , a__ , a__ , a__ , a__=None ):
_lowerCAmelCase : str = model.params
_lowerCAmelCase : str = TrainState.create(
apply_fn=model.__call__ , params=a__ , tx=a__ , loss_fn=a__ , )
if ckpt_dir is not None:
_lowerCAmelCase : Any = restore_checkpoint(a__ , a__ )
_lowerCAmelCase : Dict = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
_lowerCAmelCase : str = build_tx(**a__ )
_lowerCAmelCase : str = train_state.TrainState(
step=a__ , apply_fn=model.__call__ , params=a__ , tx=a__ , opt_state=a__ , )
_lowerCAmelCase : int = args
_lowerCAmelCase : str = data_collator
_lowerCAmelCase : Optional[Any] = lr
_lowerCAmelCase : int = params
_lowerCAmelCase : Any = jax_utils.replicate(a__ )
return state
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = self.args
_lowerCAmelCase : Union[str, Any] = len(a__ ) // args.batch_size
_lowerCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
_lowerCAmelCase : Any = jax.random.split(a__ , jax.device_count() )
for epoch in range(args.max_epochs ):
_lowerCAmelCase : str = jnp.array(0 , dtype=jnp.floataa )
_lowerCAmelCase : Optional[Any] = get_batched_dataset(a__ , args.batch_size , seed=a__ )
_lowerCAmelCase : Optional[Any] = 0
for batch in tqdm(a__ , total=a__ , desc=F"Running EPOCH-{epoch}" ):
_lowerCAmelCase : Optional[int] = self.data_collator(a__ )
_lowerCAmelCase : Optional[Any] = self.train_step_fn(a__ , a__ , **a__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
_lowerCAmelCase : int = jax_utils.unreplicate(state.step )
_lowerCAmelCase : int = running_loss.item() / i
_lowerCAmelCase : Optional[Any] = self.scheduler_fn(state_step - 1 )
_lowerCAmelCase : str = self.evaluate(a__ , a__ )
_lowerCAmelCase : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(a__ ) )
self.logger.log(a__ , commit=a__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Dict = get_batched_dataset(a__ , self.args.batch_size )
_lowerCAmelCase : List[Any] = len(a__ ) // self.args.batch_size
_lowerCAmelCase : str = jnp.array(0 , dtype=jnp.floataa )
_lowerCAmelCase : List[str] = 0
for batch in tqdm(a__ , total=a__ , desc="""Evaluating ... """ ):
_lowerCAmelCase : Any = self.data_collator(a__ )
_lowerCAmelCase : Tuple = self.val_step_fn(a__ , **a__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def __A ( self , a__ , a__ ):
_lowerCAmelCase : int = jax_utils.unreplicate(a__ )
print(F"SAVING CHECKPOINT IN {save_dir}" , end=""" ... """ )
self.model_save_fn(a__ , params=state.params )
with open(os.path.join(a__ , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(a__ , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(a__ , """data_collator.joblib""" ) )
with open(os.path.join(a__ , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , a__ )
print("""DONE""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
print(f"RESTORING CHECKPOINT FROM {save_dir}" ,end=""" ... """ )
with open(os.path.join(_lowerCamelCase ,"""flax_model.msgpack""" ) ,"""rb""" ) as f:
_lowerCAmelCase : List[Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(_lowerCamelCase ,"""opt_state.msgpack""" ) ,"""rb""" ) as f:
_lowerCAmelCase : Optional[int] = from_bytes(state.opt_state ,f.read() )
_lowerCAmelCase : int = joblib.load(os.path.join(_lowerCamelCase ,"""args.joblib""" ) )
_lowerCAmelCase : List[Any] = joblib.load(os.path.join(_lowerCamelCase ,"""data_collator.joblib""" ) )
with open(os.path.join(_lowerCamelCase ,"""training_state.json""" ) ,"""r""" ) as f:
_lowerCAmelCase : Tuple = json.load(_lowerCamelCase )
_lowerCAmelCase : List[str] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : int ,_lowerCamelCase : Any ,_lowerCamelCase : str ) -> List[str]:
_lowerCAmelCase : Union[str, Any] = num_train_steps - warmup_steps
_lowerCAmelCase : Tuple = optax.linear_schedule(init_value=_lowerCamelCase ,end_value=_lowerCamelCase ,transition_steps=_lowerCamelCase )
_lowerCAmelCase : str = optax.linear_schedule(init_value=_lowerCamelCase ,end_value=1e-7 ,transition_steps=_lowerCamelCase )
_lowerCAmelCase : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ) -> str:
def weight_decay_mask(_lowerCamelCase : List[Any] ):
_lowerCAmelCase : List[str] = traverse_util.flatten_dict(_lowerCamelCase )
_lowerCAmelCase : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(_lowerCamelCase )
_lowerCAmelCase : List[Any] = scheduler_fn(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = optax.adamw(learning_rate=_lowerCamelCase ,weight_decay=_lowerCamelCase ,mask=_lowerCamelCase )
return tx, lr
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> list[int]:
_lowerCAmelCase : Any = [True] * limit
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
_lowerCAmelCase : str = i * 2
while index < limit:
_lowerCAmelCase : Any = False
_lowerCAmelCase : str = index + i
_lowerCAmelCase : Tuple = [2]
for i in range(3 ,_lowerCamelCase ,2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : Union[str, Any] = prime_sieve(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[str] = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length ,len(_lowerCamelCase ) ):
_lowerCAmelCase : str = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_lowerCAmelCase : List[Any] = j - i
_lowerCAmelCase : str = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
_a : Optional[Any] = 'Tobias Carryer'
from time import time
class __A :
def __init__( self , a__ , a__ , a__ , a__=int(time() ) ): # noqa: B008
_lowerCAmelCase : Any = multiplier
_lowerCAmelCase : str = increment
_lowerCAmelCase : Union[str, Any] = modulo
_lowerCAmelCase : str = seed
def __A ( self ):
_lowerCAmelCase : List[Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_a : Optional[Any] = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_a : int = logging.get_logger(__name__)
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **a__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase : Union[str, Any] = deprecated_arg[3:]
_lowerCAmelCase : Optional[Any] = not kwargs.pop(a__ )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
_lowerCAmelCase : Dict = kwargs.pop("""tpu_name""" , self.tpu_name )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""device_idx""" , self.device_idx )
_lowerCAmelCase : int = kwargs.pop("""eager_mode""" , self.eager_mode )
_lowerCAmelCase : List[str] = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**a__ )
_UpperCamelCase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Name of TPU"} , )
_UpperCamelCase : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Benchmark models in eager model."} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __A ( self ):
requires_backends(self , ["""tf"""] )
_lowerCAmelCase : int = None
if self.tpu:
try:
if self.tpu_name:
_lowerCAmelCase : int = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCAmelCase : int = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCAmelCase : List[Any] = None
return tpu
@cached_property
def __A ( self ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCAmelCase : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
_lowerCAmelCase : Dict = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
_lowerCAmelCase : str = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def __A ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def __A ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def __A ( self ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def __A ( self ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __A ( self ):
return self.n_gpu > 0
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Any = _ask_options(
"""In which compute environment are you running?""" ,["""This machine""", """AWS (Amazon SageMaker)"""] ,_convert_compute_environment ,)
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase : str = get_sagemaker_input()
else:
_lowerCAmelCase : Union[str, Any] = get_cluster_input()
return config
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]=None ) -> Optional[Any]:
if subparsers is not None:
_lowerCAmelCase : Union[str, Any] = subparsers.add_parser("""config""" ,description=_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = argparse.ArgumentParser("""Accelerate config command""" ,description=_lowerCamelCase )
parser.add_argument(
"""--config_file""" ,default=_lowerCamelCase ,help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) ,)
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = get_user_input()
if args.config_file is not None:
_lowerCAmelCase : Tuple = args.config_file
else:
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : List[Any] = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_lowerCamelCase )
else:
config.to_yaml_file(_lowerCamelCase )
print(f"accelerate configuration saved at {config_file}" )
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : List[str] = config_command_parser()
_lowerCAmelCase : List[Any] = parser.parse_args()
config_command(_lowerCamelCase )
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_a : Optional[Any] = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_a : List[str] = {
'ctrl': 256,
}
_a : List[Any] = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[int]:
_lowerCAmelCase : str = set()
_lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : List[str] = char
_lowerCAmelCase : str = set(_lowerCamelCase )
return pairs
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = CONTROL_CODES
def __init__( self , a__ , a__ , a__="<unk>" , **a__ ):
super().__init__(unk_token=a__ , **a__ )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : Union[str, Any] = json.load(a__ )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : List[str] = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : List[Any] = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : Optional[int] = {}
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : int = tuple(a__ )
_lowerCAmelCase : Tuple = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_lowerCAmelCase : List[str] = get_pairs(a__ )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : Optional[Any] = bigram
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = 0
while i < len(a__ ):
try:
_lowerCAmelCase : Dict = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Any = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Dict = tuple(a__ )
_lowerCAmelCase : List[Any] = new_word
if len(a__ ) == 1:
break
else:
_lowerCAmelCase : Optional[int] = get_pairs(a__ )
_lowerCAmelCase : Any = """@@ """.join(a__ )
_lowerCAmelCase : List[Any] = word[:-4]
_lowerCAmelCase : Optional[int] = word
return word
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = re.findall(r"""\S+\n?""" , a__ )
for token in words:
split_tokens.extend(list(self.bpe(a__ ).split(""" """ ) ) )
return split_tokens
def __A ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ , self.unk_token )
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = """ """.join(a__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Any = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
_lowerCAmelCase : Optional[int] = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : List[Any] = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : NDArray[floataa] ,_lowerCamelCase : NDArray[floataa] ,_lowerCamelCase : list[int] ,_lowerCamelCase : int ,) -> list[float]:
_lowerCAmelCase : Union[str, Any] = coefficient_matrix.shape
_lowerCAmelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
_lowerCAmelCase : int = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(_lowerCamelCase )
if colsa != 1:
_lowerCAmelCase : str = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(_lowerCamelCase )
if rowsa != rowsa:
_lowerCAmelCase : Any = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != rowsa:
_lowerCAmelCase : Dict = (
"""Number of initial values must be equal to number of rows in coefficient """
f"matrix but received {len(_lowerCamelCase )} and {rowsa}"
)
raise ValueError(_lowerCamelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
_lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) ,axis=1 )
_lowerCAmelCase : Optional[int] = table.shape
strictly_diagonally_dominant(_lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : int = []
for row in range(_lowerCamelCase ):
_lowerCAmelCase : str = 0
for col in range(_lowerCamelCase ):
if col == row:
_lowerCAmelCase : int = table[row][col]
elif col == cols - 1:
_lowerCAmelCase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowerCAmelCase : List[str] = (temp + val) / denom
new_val.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = new_val
return [float(_lowerCamelCase ) for i in new_val]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : NDArray[floataa] ) -> bool:
_lowerCAmelCase : Any = table.shape
_lowerCAmelCase : Union[str, Any] = True
for i in range(0 ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
for j in range(0 ,cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Dict ) -> int:
# Initialise PyTorch model
_lowerCAmelCase : Any = FunnelConfig.from_json_file(_lowerCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : Optional[int] = FunnelBaseModel(_lowerCamelCase ) if base_model else FunnelModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() ,_lowerCamelCase )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
_a : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> List[str]:
_lowerCAmelCase : List[Any] = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
_lowerCAmelCase : List[Any] = 1024
_lowerCAmelCase : Tuple = 4096
_lowerCAmelCase : Optional[int] = 24
_lowerCAmelCase : Any = 16
_lowerCAmelCase : Any = [5, 11, 17, 23]
_lowerCAmelCase : List[str] = [256, 512, 1024, 1024]
_lowerCAmelCase : Dict = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowerCAmelCase : Tuple = 768
_lowerCAmelCase : List[str] = [1, 1, 1, 0.5]
_lowerCAmelCase : int = [256, 512, 768, 768]
_lowerCAmelCase : Optional[Any] = 150
_lowerCAmelCase : Optional[Any] = 16
_lowerCAmelCase : Dict = (1, 384, 384)
_lowerCAmelCase : str = False
_lowerCAmelCase : int = """project"""
if "ade" in checkpoint_url:
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 768
_lowerCAmelCase : Dict = [1, 1, 1, 0.5]
_lowerCAmelCase : Any = 150
_lowerCAmelCase : str = 16
_lowerCAmelCase : Dict = """huggingface/label-files"""
_lowerCAmelCase : Optional[int] = """ade20k-id2label.json"""
_lowerCAmelCase : Optional[Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase ,_lowerCamelCase ,repo_type="""dataset""" ) ) ,"""r""" ) )
_lowerCAmelCase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Dict:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCAmelCase : List[Any] = name.replace("""pretrained.model""" ,"""dpt.encoder""" )
if "pretrained.model" in name:
_lowerCAmelCase : int = name.replace("""pretrained.model""" ,"""dpt.embeddings""" )
if "patch_embed" in name:
_lowerCAmelCase : str = name.replace("""patch_embed""" ,"""""" )
if "pos_embed" in name:
_lowerCAmelCase : Optional[int] = name.replace("""pos_embed""" ,"""position_embeddings""" )
if "attn.proj" in name:
_lowerCAmelCase : int = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "proj" in name and "project" not in name:
_lowerCAmelCase : int = name.replace("""proj""" ,"""projection""" )
if "blocks" in name:
_lowerCAmelCase : str = name.replace("""blocks""" ,"""layer""" )
if "mlp.fc1" in name:
_lowerCAmelCase : List[Any] = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase : int = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "norm1" in name and "backbone" not in name:
_lowerCAmelCase : str = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name and "backbone" not in name:
_lowerCAmelCase : int = name.replace("""norm2""" ,"""layernorm_after""" )
if "scratch.output_conv" in name:
_lowerCAmelCase : str = name.replace("""scratch.output_conv""" ,"""head""" )
if "scratch" in name:
_lowerCAmelCase : List[str] = name.replace("""scratch""" ,"""neck""" )
if "layer1_rn" in name:
_lowerCAmelCase : Dict = name.replace("""layer1_rn""" ,"""convs.0""" )
if "layer2_rn" in name:
_lowerCAmelCase : List[Any] = name.replace("""layer2_rn""" ,"""convs.1""" )
if "layer3_rn" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""layer3_rn""" ,"""convs.2""" )
if "layer4_rn" in name:
_lowerCAmelCase : List[str] = name.replace("""layer4_rn""" ,"""convs.3""" )
if "refinenet" in name:
_lowerCAmelCase : Optional[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCAmelCase : List[str] = name.replace(f"refinenet{layer_idx}" ,f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""out_conv""" ,"""projection""" )
if "resConfUnit1" in name:
_lowerCAmelCase : Optional[int] = name.replace("""resConfUnit1""" ,"""residual_layer1""" )
if "resConfUnit2" in name:
_lowerCAmelCase : Optional[int] = name.replace("""resConfUnit2""" ,"""residual_layer2""" )
if "conv1" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""conv1""" ,"""convolution1""" )
if "conv2" in name:
_lowerCAmelCase : Optional[Any] = name.replace("""conv2""" ,"""convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess1.0.project.0""" ,"""neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""" ,"""neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCAmelCase : List[Any] = name.replace("""pretrained.act_postprocess3.0.project.0""" ,"""neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCAmelCase : int = name.replace("""pretrained.act_postprocess4.0.project.0""" ,"""neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCAmelCase : int = name.replace("""pretrained.act_postprocess1.3""" ,"""neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_lowerCAmelCase : List[str] = name.replace("""pretrained.act_postprocess1.4""" ,"""neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_lowerCAmelCase : List[str] = name.replace("""pretrained.act_postprocess2.3""" ,"""neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""pretrained.act_postprocess2.4""" ,"""neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_lowerCAmelCase : Any = name.replace("""pretrained.act_postprocess3.3""" ,"""neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_lowerCAmelCase : Optional[int] = name.replace("""pretrained.act_postprocess4.3""" ,"""neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_lowerCAmelCase : Dict = name.replace("""pretrained.act_postprocess4.4""" ,"""neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_lowerCAmelCase : List[Any] = name.replace("""pretrained""" ,"""dpt""" )
if "bn" in name:
_lowerCAmelCase : Tuple = name.replace("""bn""" ,"""batch_norm""" )
if "head" in name:
_lowerCAmelCase : Dict = name.replace("""head""" ,"""head.head""" )
if "encoder.norm" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""encoder.norm""" ,"""layernorm""" )
if "auxlayer" in name:
_lowerCAmelCase : Any = name.replace("""auxlayer""" ,"""auxiliary_head.head""" )
if "backbone" in name:
_lowerCAmelCase : int = name.replace("""backbone""" ,"""backbone.bit.encoder""" )
if ".." in name:
_lowerCAmelCase : Dict = name.replace("""..""" ,""".""" )
if "stem.conv" in name:
_lowerCAmelCase : Any = name.replace("""stem.conv""" ,"""bit.embedder.convolution""" )
if "blocks" in name:
_lowerCAmelCase : List[str] = name.replace("""blocks""" ,"""layers""" )
if "convolution" in name and "backbone" in name:
_lowerCAmelCase : str = name.replace("""convolution""" ,"""conv""" )
if "layer" in name and "backbone" in name:
_lowerCAmelCase : Any = name.replace("""layer""" ,"""layers""" )
if "backbone.bit.encoder.bit" in name:
_lowerCAmelCase : str = name.replace("""backbone.bit.encoder.bit""" ,"""backbone.bit""" )
if "embedder.conv" in name:
_lowerCAmelCase : List[str] = name.replace("""embedder.conv""" ,"""embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
_lowerCAmelCase : List[str] = name.replace("""backbone.bit.encoder.stem.norm""" ,"""backbone.bit.embedder.norm""" )
return name
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : str = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
_lowerCAmelCase : Optional[int] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Any = in_proj_weight[: config.hidden_size, :]
_lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ,_lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Tuple = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowerCAmelCase : Optional[Any] = torch.load(_lowerCamelCase ,map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase : List[Any] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase ,_lowerCamelCase )
# load HuggingFace model
_lowerCAmelCase : List[str] = DPTForSemanticSegmentation(_lowerCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
_lowerCAmelCase : Union[str, Any] = 480 if """ade""" in checkpoint_url else 384
_lowerCAmelCase : Optional[Any] = DPTImageProcessor(size=_lowerCamelCase )
_lowerCAmelCase : List[Any] = prepare_img()
_lowerCAmelCase : Optional[Any] = image_processor(_lowerCamelCase ,return_tensors="""pt""" )
# forward pass
_lowerCAmelCase : Optional[Any] = model(**_lowerCamelCase ).logits if """ade""" in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
if show_prediction:
_lowerCAmelCase : Union[str, Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) ,size=(image.size[1], image.size[0]) ,mode="""bicubic""" ,align_corners=_lowerCamelCase ,)
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
_a : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(_lowerCamelCase ) ,_lowerCamelCase ) ) as input_file:
_lowerCAmelCase : Dict = [
[int(_lowerCamelCase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
_lowerCAmelCase : str = len(_lowerCamelCase )
_lowerCAmelCase : Tuple = len(matrix[0] )
_lowerCAmelCase : Dict = [[-1 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = matrix[i][0]
for j in range(1 ,_lowerCamelCase ):
for i in range(_lowerCamelCase ):
_lowerCAmelCase : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = min(
minimal_path_sums[i][j] ,minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 ,-1 ,-1 ):
_lowerCAmelCase : List[Any] = min(
minimal_path_sums[i][j] ,minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__=3 , a__=None , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Union[str, Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Optional[int] = (image_size // patch_size) ** 2
_lowerCAmelCase : Tuple = num_patches + 1
def __A ( self ):
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFViTModel(config=a__ )
_lowerCAmelCase : Optional[Any] = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowerCAmelCase : str = self.image_size // 2
_lowerCAmelCase : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
_lowerCAmelCase : List[str] = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
_lowerCAmelCase : int = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : List[str] = TFViTForImageClassification(a__ )
_lowerCAmelCase : Any = model(a__ , labels=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowerCAmelCase : Any = self.image_size // 2
_lowerCAmelCase : int = pixel_values[:, :, :image_size, :image_size]
_lowerCAmelCase : Dict = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Any = TFViTForImageClassification(a__ )
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCamelCase : List[str] = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
def __A ( self ):
_lowerCAmelCase : List[str] = TFViTModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Layer ) )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(a__ )
_lowerCAmelCase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Dict = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
_lowerCAmelCase : List[Any] = self.default_image_processor
_lowerCAmelCase : Union[str, Any] = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=a__ , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase : Tuple = model(**a__ )
# verify the logits
_lowerCAmelCase : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : Optional[Any] = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , a__ , atol=1e-4 )
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_a : Optional[Any] = getLogger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : str ,_lowerCamelCase : str ,_lowerCamelCase : int = 8 ,_lowerCamelCase : int = 1024 ,_lowerCamelCase : Optional[Any]="val" ,_lowerCamelCase : List[str]=None ,_lowerCamelCase : List[str]=False ,_lowerCamelCase : Tuple="summarization" ,_lowerCamelCase : Dict=None ,_lowerCamelCase : int=1 ,_lowerCamelCase : Dict = None ,_lowerCamelCase : str="" ,**_lowerCamelCase : Optional[Any] ,) -> Dict:
_lowerCAmelCase : List[str] = str(_lowerCamelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""" ,rank=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = Path(_lowerCamelCase )
_lowerCAmelCase : List[Any] = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(_lowerCamelCase )
_lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase ).cuda()
if fpaa:
_lowerCAmelCase : Tuple = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowerCamelCase ,_lowerCamelCase ) # update config with task specific params
_lowerCAmelCase : List[Any] = generate_kwargs.pop("""num_beams""" ,model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_lowerCAmelCase : Optional[int] = num_return_sequences
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(_lowerCamelCase )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_lowerCAmelCase : List[str] = tokenizer.model_max_length
if prefix is None:
_lowerCAmelCase : Union[str, Any] = prefix or getattr(model.config ,"""prefix""" ,"""""" ) or """"""
_lowerCAmelCase : Union[str, Any] = SeqaSeqDataset(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,max_target_length=1024 ,type_path=_lowerCamelCase ,n_obs=_lowerCamelCase ,prefix=_lowerCamelCase ,**_lowerCamelCase ,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_lowerCAmelCase : List[str] = ds.make_sortish_sampler(_lowerCamelCase ,distributed=_lowerCamelCase ,add_extra_examples=_lowerCamelCase ,shuffle=_lowerCamelCase )
_lowerCAmelCase : Any = DataLoader(_lowerCamelCase ,sampler=_lowerCamelCase ,batch_size=_lowerCamelCase ,collate_fn=ds.collate_fn )
_lowerCAmelCase : int = []
for batch in tqdm(_lowerCamelCase ):
_lowerCAmelCase : str = model.generate(
input_ids=batch["""input_ids"""].to(model.device ) ,attention_mask=batch["""attention_mask"""].to(model.device ) ,num_return_sequences=_lowerCamelCase ,num_beams=_lowerCamelCase ,**_lowerCamelCase ,)
_lowerCAmelCase : str = tokenizer.batch_decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = batch["""ids"""]
if num_return_sequences > 1:
_lowerCAmelCase : List[Any] = chunks(_lowerCamelCase ,_lowerCamelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowerCamelCase ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(_lowerCamelCase ,_lowerCamelCase )
return results, sampler.num_replicas
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""" ,type=_lowerCamelCase ,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""" ,type=_lowerCamelCase ,help="""like facebook/bart-large-cnn,t5-base, etc.""" ,default="""sshleifer/distilbart-xsum-12-3""" ,)
parser.add_argument("""--save_dir""" ,type=_lowerCamelCase ,help="""where to save""" ,default="""tmp_gen""" )
parser.add_argument("""--max_source_length""" ,type=_lowerCamelCase ,default=_lowerCamelCase )
parser.add_argument(
"""--type_path""" ,type=_lowerCamelCase ,default="""test""" ,help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""" ,type=_lowerCamelCase ,default="""summarization""" ,help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" ,type=_lowerCamelCase ,default=8 ,required=_lowerCamelCase ,help="""batch size""" )
parser.add_argument(
"""--local_rank""" ,type=_lowerCamelCase ,default=-1 ,required=_lowerCamelCase ,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,required=_lowerCamelCase ,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""" ,type=_lowerCamelCase ,default=1 ,required=_lowerCamelCase ,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""" ,type=_lowerCamelCase ,default=600 ,required=_lowerCamelCase ,help="""How long should master process wait for other processes to finish.""" ,)
parser.add_argument("""--src_lang""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,required=_lowerCamelCase )
parser.add_argument("""--tgt_lang""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,required=_lowerCamelCase )
parser.add_argument(
"""--prefix""" ,type=_lowerCamelCase ,required=_lowerCamelCase ,default=_lowerCamelCase ,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""" ,action="""store_true""" )
parser.add_argument("""--debug""" ,action="""store_true""" )
_lowerCAmelCase : List[Any] = time.time()
_lowerCAmelCase : str = parser.parse_known_args()
_lowerCAmelCase : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(_lowerCamelCase )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
_lowerCAmelCase : str = Path(args.save_dir + """_tmp""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) # this handles locking.
_lowerCAmelCase : Any = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_lowerCAmelCase : Union[str, Any] = {}
if args.src_lang is not None:
_lowerCAmelCase : Any = args.src_lang
if args.tgt_lang is not None:
_lowerCAmelCase : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = eval_data_dir(
args.data_dir ,_lowerCamelCase ,args.model_name ,type_path=args.type_path ,bs=args.bs ,fpaa=args.fpaa ,task=args.task ,local_rank=args.local_rank ,n_obs=args.n_obs ,max_source_length=args.max_source_length ,num_return_sequences=args.num_return_sequences ,prefix=args.prefix ,dataset_kwargs=_lowerCamelCase ,**_lowerCamelCase ,)
if args.local_rank <= 0:
_lowerCAmelCase : Optional[int] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[str] = gather_results_from_each_node(_lowerCamelCase ,_lowerCamelCase ,args.sync_timeout )
_lowerCAmelCase : List[Any] = combine_partial_results(_lowerCamelCase )
if args.num_return_sequences > 1:
_lowerCAmelCase : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(_lowerCamelCase ,_lowerCamelCase )
return
_lowerCAmelCase : Union[str, Any] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : List[Any] = [x.rstrip() for x in f.readlines()][: len(_lowerCamelCase )]
# Calculate metrics, save metrics, and save _generations.txt
_lowerCAmelCase : List[str] = """translation""" in args.task
_lowerCAmelCase : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
_lowerCAmelCase : str = """bleu""" if calc_bleu else """rouge"""
_lowerCAmelCase : Dict = score_fn(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = len(_lowerCamelCase )
_lowerCAmelCase : int = time.time() - start_time
_lowerCAmelCase : Optional[Any] = round(runtime / metrics["""n_obs"""] ,4 )
_lowerCAmelCase : Any = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_lowerCAmelCase : Union[str, Any] = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(_lowerCamelCase ,_lowerCamelCase ,indent=_lowerCamelCase )
print(_lowerCamelCase )
write_txt_file(_lowerCamelCase ,save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(_lowerCamelCase ,save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> List:
_lowerCAmelCase : Union[str, Any] = []
for partial_result in partial_results:
records.extend(_lowerCamelCase )
_lowerCAmelCase : Any = sorted(_lowerCamelCase ,key=lambda _lowerCamelCase : x["id"] )
_lowerCAmelCase : str = [x["""pred"""] for x in records]
return preds
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[str] ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
_lowerCAmelCase : Optional[int] = time.time()
logger.info("""waiting for all nodes to finish""" )
_lowerCAmelCase : str = None
while (time.time() - start_wait) < timeout:
_lowerCAmelCase : List[Any] = list(save_dir.glob("""rank_*.json""" ) )
if len(_lowerCamelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_lowerCAmelCase : Any = lmap(_lowerCamelCase ,_lowerCamelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,_lowerCamelCase : Union[str, Any] ) -> Tuple:
# Initialise PyTorch model
_lowerCAmelCase : str = RemBertConfig.from_json_file(_lowerCamelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[Any] = RemBertModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_lowerCamelCase ) )
torch.save(model.state_dict() ,_lowerCamelCase )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a : int = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : int = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "sew-d"
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__=2 , a__=512 , a__=256 , a__=True , a__=True , a__=("p2c", "c2p") , a__="layer_norm" , a__="gelu_python" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.1 , a__=0.0_2 , a__=1e-7 , a__=1e-5 , a__="group" , a__="gelu" , a__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a__=False , a__=128 , a__=16 , a__=True , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__="mean" , a__=False , a__=False , a__=256 , a__=0 , a__=1 , a__=2 , **a__ , ):
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[Any] = feat_extract_norm
_lowerCAmelCase : Any = feat_extract_activation
_lowerCAmelCase : Optional[int] = list(a__ )
_lowerCAmelCase : Dict = list(a__ )
_lowerCAmelCase : Tuple = list(a__ )
_lowerCAmelCase : int = conv_bias
_lowerCAmelCase : Any = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : int = len(self.conv_dim )
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : int = squeeze_factor
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = position_buckets
_lowerCAmelCase : List[str] = share_att_key
_lowerCAmelCase : Optional[Any] = relative_attention
_lowerCAmelCase : str = norm_rel_ebd
_lowerCAmelCase : Any = list(a__ )
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Union[str, Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : int = final_dropout
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : List[Any] = feature_layer_norm_eps
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : List[Any] = apply_spec_augment
_lowerCAmelCase : List[Any] = mask_time_prob
_lowerCAmelCase : Any = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : List[str] = mask_feature_length
_lowerCAmelCase : Any = mask_feature_min_masks
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : List[str] = ctc_zero_infinity
# sequence classification
_lowerCAmelCase : Union[str, Any] = use_weighted_layer_sum
_lowerCAmelCase : Optional[int] = classifier_proj_size
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = (PNDMScheduler,)
_UpperCamelCase : Optional[Any] = (("num_inference_steps", 50),)
def __A ( self , **a__ ):
_lowerCAmelCase : Optional[int] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**a__ )
return config
def __A ( self , a__=0 , **a__ ):
_lowerCAmelCase : str = dict(self.forward_default_kwargs )
_lowerCAmelCase : str = kwargs.pop("""num_inference_steps""" , a__ )
_lowerCAmelCase : Tuple = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**a__ )
_lowerCAmelCase : Any = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
_lowerCAmelCase : Any = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
_lowerCAmelCase : str = dummy_past_residuals[:]
_lowerCAmelCase : Optional[int] = scheduler.step_prk(a__ , a__ , a__ , **a__ ).prev_sample
_lowerCAmelCase : Any = new_scheduler.step_prk(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Tuple = scheduler.step_plms(a__ , a__ , a__ , **a__ ).prev_sample
_lowerCAmelCase : Optional[int] = new_scheduler.step_plms(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self ):
pass
def __A ( self , a__=0 , **a__ ):
_lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop("""num_inference_steps""" , a__ )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
_lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
_lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Optional[int] = dummy_past_residuals[:]
_lowerCAmelCase : int = scheduler.step_prk(a__ , a__ , a__ , **a__ ).prev_sample
_lowerCAmelCase : int = new_scheduler.step_prk(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase : List[str] = scheduler.step_plms(a__ , a__ , a__ , **a__ ).prev_sample
_lowerCAmelCase : Optional[Any] = new_scheduler.step_plms(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self , **a__ ):
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**a__ )
_lowerCAmelCase : Tuple = scheduler_class(**a__ )
_lowerCAmelCase : List[Any] = 10
_lowerCAmelCase : Optional[int] = self.dummy_model()
_lowerCAmelCase : int = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase : int = model(a__ , a__ )
_lowerCAmelCase : List[Any] = scheduler.step_prk(a__ , a__ , a__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase : Dict = model(a__ , a__ )
_lowerCAmelCase : Tuple = scheduler.step_plms(a__ , a__ , a__ ).prev_sample
return sample
def __A ( self ):
_lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" , a__ )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Dict = scheduler_class(**a__ )
_lowerCAmelCase : Optional[int] = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , """set_timesteps""" ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , """set_timesteps""" ):
_lowerCAmelCase : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
_lowerCAmelCase : Dict = dummy_past_residuals[:]
_lowerCAmelCase : Union[str, Any] = scheduler.step_prk(a__ , 0 , a__ , **a__ ).prev_sample
_lowerCAmelCase : Any = scheduler.step_prk(a__ , 1 , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase : Tuple = scheduler.step_plms(a__ , 0 , a__ , **a__ ).prev_sample
_lowerCAmelCase : List[Any] = scheduler.step_plms(a__ , 1 , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def __A ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a__ )
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase : Optional[Any] = scheduler_class(**a__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def __A ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def __A ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a__ )
def __A ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def __A ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=a__ )
def __A ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a__ )
def __A ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
_lowerCAmelCase : Dict = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCAmelCase : int = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase : Optional[Any] = scheduler.step_prk(a__ , a__ , a__ ).prev_sample
def __A ( self ):
with self.assertRaises(a__ ):
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**a__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __A ( self ):
_lowerCAmelCase : Dict = self.full_loop()
_lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(a__ ) )
_lowerCAmelCase : int = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __A ( self ):
_lowerCAmelCase : List[str] = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase : Optional[int] = torch.sum(torch.abs(a__ ) )
_lowerCAmelCase : int = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __A ( self ):
# We specify different beta, so that the first alpha is 0.99
_lowerCAmelCase : str = self.full_loop(set_alpha_to_one=a__ , beta_start=0.0_1 )
_lowerCAmelCase : Dict = torch.sum(torch.abs(a__ ) )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __A ( self ):
# We specify different beta, so that the first alpha is 0.99
_lowerCAmelCase : Dict = self.full_loop(set_alpha_to_one=a__ , beta_start=0.0_1 )
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(a__ ) )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
_a : Optional[Any] = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_a : List[str] = concatenate_datasets
_a : Optional[Any] = DownloadConfig
_a : Tuple = DownloadManager
_a : List[str] = DownloadMode
_a : Tuple = DownloadConfig
_a : Union[str, Any] = DownloadMode
_a : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_a : Dict = 299_792_458
# Symbols
_a : Union[str, Any] = symbols('ct x y z')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ) -> float:
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ) -> float:
return 1 / sqrt(1 - beta(_lowerCamelCase ) ** 2 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ) -> np.ndarray:
return np.array(
[
[gamma(_lowerCamelCase ), -gamma(_lowerCamelCase ) * beta(_lowerCamelCase ), 0, 0],
[-gamma(_lowerCamelCase ) * beta(_lowerCamelCase ), gamma(_lowerCamelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowerCAmelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_lowerCamelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_a : Union[str, Any] = transform(29_979_245)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
_a : Optional[Any] = {ct: c, x: 1, y: 1, z: 1}
_a : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[str] = np.max(_lowerCamelCase ,axis=-1 ,keepdims=_lowerCamelCase )
_lowerCAmelCase : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=_lowerCamelCase )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self , **a__ ):
_lowerCAmelCase : int = {}
if "second_text" in kwargs:
_lowerCAmelCase : Optional[int] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def __A ( self , a__ , a__=None ):
return self.tokenizer(a__ , text_pair=a__ , return_tensors=self.framework )
def __A ( self , a__ ):
return self.model(**a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Any = model_outputs.logits[0].numpy()
_lowerCAmelCase : List[Any] = softmax(a__ )
_lowerCAmelCase : List[str] = np.argmax(a__ )
_lowerCAmelCase : Optional[int] = self.model.config.idalabel[best_class]
_lowerCAmelCase : Optional[Any] = probabilities[best_class].item()
_lowerCAmelCase : Any = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : list[str] ) -> str:
_lowerCAmelCase : List[str] = """"""
for word_or_phrase in separated:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
def UpperCAmelCase__ ( )->int:
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(_SCREAMING_SNAKE_CASE , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 664 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 664 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCAmelCase ) )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return "lower newer", "lower newer"
def __lowerCAmelCase ( self ):
_lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase = '''lower'''
_lowerCAmelCase = ['''low''', '''er</w>''']
_lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = tokens + ['''<unk>''']
_lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# Simple input
_lowerCAmelCase = '''This is a simple input'''
_lowerCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCAmelCase = ('''This is a simple input''', '''This is a pair''')
_lowerCAmelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding='''max_length''' , )
def __lowerCAmelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( snake_case_ ):
pass
| 664 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
# Initialise PyTorch model
_lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 664 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "Hello world! cécé herlolip"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]:
_lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
_lowerCAmelCase = roberta.model.encoder.sentence_encoder
_lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase = model.roberta.encoder.layer[i]
_lowerCAmelCase = roberta_sent_encoder.layers[i]
_lowerCAmelCase = layer.attention
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase = roberta_layer.final_layer_norm.weight
_lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# output
_lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 664 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''MCTCTFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = '''AutoTokenizer'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCAmelCase , **_lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_lowerCAmelCase = kwargs.pop('''raw_speech''' )
else:
_lowerCAmelCase = kwargs.pop('''audio''' , _lowerCAmelCase )
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
_lowerCAmelCase = kwargs.pop('''text''' , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_lowerCAmelCase = self.feature_extractor(_lowerCAmelCase , *_lowerCAmelCase , sampling_rate=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , **_lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowerCAmelCase = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = kwargs.pop('''input_features''' , _lowerCAmelCase )
_lowerCAmelCase = kwargs.pop('''labels''' , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if input_features is not None:
_lowerCAmelCase = self.feature_extractor.pad(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
if labels is not None:
_lowerCAmelCase = self.tokenizer.pad(_lowerCAmelCase , **_lowerCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowerCAmelCase = labels['''input_ids''']
return input_features
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@contextmanager
def __lowerCAmelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer
yield
_lowerCAmelCase = self.feature_extractor
_lowerCAmelCase = False
| 664 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 664 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
_lowerCAmelCase = DisjunctiveConstraint(_lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCAmelCase ) )
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint(_lowerCAmelCase ) # fails here
def __lowerCAmelCase ( self ):
_lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
_lowerCAmelCase = DisjunctiveConstraint(_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(1 )
_lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(2 )
_lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(3 )
_lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowerCAmelCase = DisjunctiveConstraint(_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 664 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 1 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->str:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->str:
_lowerCAmelCase = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_lowerCAmelCase = '''-''' if number.startswith('''-''' ) else ''''''
_lowerCAmelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 664 | 1 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 664 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 664 | 1 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0 )->int:
_lowerCAmelCase , _lowerCAmelCase = 1, 1
_lowerCAmelCase = 2
while True:
_lowerCAmelCase = 0
_lowerCAmelCase = fa + fa
_lowerCAmelCase , _lowerCAmelCase = fa, f
index += 1
for _ in str(_SCREAMING_SNAKE_CASE ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 664 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 664 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 664 | 1 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Optional[int]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=0 )->List[Any]:
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[column] )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str=float('''inf''' ) )->Dict:
for i in range(points_counts - 1 ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCAmelCase = current_dis
return min_dis
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=float('''inf''' ) )->Union[str, Any]:
for i in range(min(6 , points_counts - 1 ) , _SCREAMING_SNAKE_CASE ):
for j in range(max(0 , i - 6 ) , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_lowerCAmelCase = current_dis
return min_dis
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# recursion
_lowerCAmelCase = points_counts // 2
_lowerCAmelCase = closest_pair_of_points_sqr(
_SCREAMING_SNAKE_CASE , points_sorted_on_y[:mid] , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = closest_pair_of_points_sqr(
_SCREAMING_SNAKE_CASE , points_sorted_on_y[mid:] , points_counts - mid )
_lowerCAmelCase = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = dis_between_closest_in_strip(
_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
return min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] )->str:
_lowerCAmelCase = column_based_sort(_SCREAMING_SNAKE_CASE , column=0 )
_lowerCAmelCase = column_based_sort(_SCREAMING_SNAKE_CASE , column=1 )
return (
closest_pair_of_points_sqr(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 664 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 664 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCAmelCase :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __lowerCAmelCase ( self ):
return self.__class__(**{k: copy.deepcopy(_lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 664 |
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : Tuple )->Tuple:
requires_backends(_SCREAMING_SNAKE_CASE , ['''torch'''] )
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
requires_backends(_SCREAMING_SNAKE_CASE , ['''torch'''] )
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any )->Tuple:
requires_backends(_SCREAMING_SNAKE_CASE , ['''torch'''] )
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any )->Dict:
requires_backends(_SCREAMING_SNAKE_CASE , ['''torch'''] )
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[str] )->Dict:
requires_backends(_SCREAMING_SNAKE_CASE , ['''torch'''] )
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : Optional[int] )->Tuple:
requires_backends(_SCREAMING_SNAKE_CASE , ['''torch'''] )
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
requires_backends(_SCREAMING_SNAKE_CASE , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase ( metaclass=snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''torch''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def __lowerCAmelCase ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['''torch'''] )
| 664 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = 3
_lowerCAmelCase = 250
_lowerCAmelCase = ids_tensor((batch_size, length) , _lowerCAmelCase )
_lowerCAmelCase = torch.ones((batch_size, length) , device=_lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def __lowerCAmelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(5 )
_lowerCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MaxLengthCriteria(max_length=10 )
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __lowerCAmelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase = self._get_tensors(5 )
_lowerCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_lowerCAmelCase , _lowerCAmelCase ) )
def __lowerCAmelCase ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
_lowerCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
| 664 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class UpperCAmelCase ( snake_case_ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 664 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 664 | 1 |
from functools import lru_cache
@lru_cache
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->int:
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 664 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )->Optional[Any]:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
_lowerCAmelCase = tmp_path / '''cache'''
_lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCAmelCase = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str )->List[str]:
_lowerCAmelCase = tmp_path / '''cache'''
_lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_lowerCAmelCase = features.copy() if features else default_expected_features
_lowerCAmelCase = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_sql_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->int:
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
_lowerCAmelCase = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict )->Dict:
_lowerCAmelCase = tmp_path / '''cache'''
_lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''tmp.sql''' )
_lowerCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_lowerCAmelCase = iter_sql_file(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] )->str:
_lowerCAmelCase = tmp_path / '''cache'''
_lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''tmp.sql''' )
_lowerCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_lowerCAmelCase = iter_sql_file(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = iter_sql_file(_SCREAMING_SNAKE_CASE )
for rowa, rowa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert rowa == rowa
@require_sqlalchemy
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] )->Optional[Any]:
_lowerCAmelCase = tmp_path / '''cache'''
_lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , '''tmp.sql''' )
_lowerCAmelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_SCREAMING_SNAKE_CASE ).read()
with pytest.raises(_SCREAMING_SNAKE_CASE ):
SqlDatasetWriter(_SCREAMING_SNAKE_CASE , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 664 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 664 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase_ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
UpperCAmelCase_ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , ):
_lowerCAmelCase = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
_lowerCAmelCase = TER(
normalized=_lowerCAmelCase , no_punct=_lowerCAmelCase , asian_support=_lowerCAmelCase , case_sensitive=_lowerCAmelCase , )
_lowerCAmelCase = sb_ter.corpus_score(_lowerCAmelCase , _lowerCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 664 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result
| 664 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
@staticmethod
def __lowerCAmelCase ( *_lowerCAmelCase , **_lowerCAmelCase ):
pass
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
UpperCAmelCase_ = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = pipeline(
'''document-question-answering''' , model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , '''''' ) ) )
_lowerCAmelCase = '''What is the placebo?'''
_lowerCAmelCase = [
{
'''image''': load_image(_lowerCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dqa_pipeline(_lowerCAmelCase , top_k=2 )
self.assertEqual(
_lowerCAmelCase , [
[
{'''score''': ANY(_lowerCAmelCase ), '''answer''': ANY(_lowerCAmelCase ), '''start''': ANY(_lowerCAmelCase ), '''end''': ANY(_lowerCAmelCase )},
{'''score''': ANY(_lowerCAmelCase ), '''answer''': ANY(_lowerCAmelCase ), '''start''': ANY(_lowerCAmelCase ), '''end''': ANY(_lowerCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self ):
_lowerCAmelCase = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = '''How many cats are there?'''
_lowerCAmelCase = [
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , _lowerCAmelCase )
_lowerCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , _lowerCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(_lowerCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_lowerCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , words=_lowerCAmelCase , boxes=_lowerCAmelCase , top_k=2 )
self.assertEqual(_lowerCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self ):
_lowerCAmelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = '''What is the invoice number?'''
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_lowerCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self ):
_lowerCAmelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = '''What is the invoice number?'''
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_lowerCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self ):
_lowerCAmelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_lowerCAmelCase )
_lowerCAmelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_lowerCAmelCase , revision='''3dc6de3''' , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = '''What is the invoice number?'''
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_lowerCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
_lowerCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_lowerCAmelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.4_251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self ):
_lowerCAmelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_lowerCAmelCase )
_lowerCAmelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_lowerCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = '''What is the invoice number?'''
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
_lowerCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
_lowerCAmelCase = list(zip(*apply_tesseract(load_image(_lowerCAmelCase ) , _lowerCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_lowerCAmelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4 ) , [
{'''score''': 0.9_999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __lowerCAmelCase ( self ):
_lowerCAmelCase = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_lowerCAmelCase = INVOICE_URL
_lowerCAmelCase = '''What is the invoice number?'''
_lowerCAmelCase = dqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(_lowerCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __lowerCAmelCase ( self ):
pass
| 664 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase_ = 1_6
UpperCAmelCase_ = 3_2
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 1_6 )->Tuple:
_lowerCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowerCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase = 1_6
elif accelerator.mixed_precision != "no":
_lowerCAmelCase = 8
else:
_lowerCAmelCase = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowerCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )->Optional[Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1":
_lowerCAmelCase = 2
# Initialize accelerator
_lowerCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase = config['''lr''']
_lowerCAmelCase = int(config['''num_epochs'''] )
_lowerCAmelCase = int(config['''seed'''] )
_lowerCAmelCase = int(config['''batch_size'''] )
_lowerCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : int ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_0_0 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=24 , _lowerCAmelCase=2 , _lowerCAmelCase=6 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=None , _lowerCAmelCase=1_000 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
_lowerCAmelCase = range_bbox
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase = bbox[i, j, 3]
_lowerCAmelCase = bbox[i, j, 1]
_lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase = bbox[i, j, 2]
_lowerCAmelCase = bbox[i, j, 0]
_lowerCAmelCase = t
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = LiltModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , bbox=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = LiltForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = LiltForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return True
def __lowerCAmelCase ( self ):
_lowerCAmelCase = LiltModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = LiltModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_lowerCAmelCase )
_lowerCAmelCase = torch.tensor([[1, 2]] , device=_lowerCAmelCase )
_lowerCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase )
_lowerCAmelCase = torch.Size([1, 2, 768] )
_lowerCAmelCase = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=_lowerCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _lowerCAmelCase , atol=1E-3 ) )
| 664 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCAmelCase_ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
UpperCAmelCase_ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
UpperCAmelCase_ = "zero2"
UpperCAmelCase_ = "zero3"
UpperCAmelCase_ = [ZEROa, ZEROa]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] )->Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCAmelCase = parameterized.to_safe_name('''_'''.join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
UpperCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCAmelCase ( snake_case_ ):
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = True , ):
_lowerCAmelCase = models[model]
_lowerCAmelCase = self.run_trainer(
stage=_lowerCAmelCase , model_name=_lowerCAmelCase , eval_steps=_lowerCAmelCase , num_train_epochs=1 , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
self.do_checks(_lowerCAmelCase )
return output_dir
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 10 , _lowerCAmelCase = 1 , _lowerCAmelCase = True , _lowerCAmelCase = True , ):
_lowerCAmelCase = self.get_auto_remove_tmp_dir('''./xxx''' , after=_lowerCAmelCase )
_lowerCAmelCase = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_lowerCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCAmelCase = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCAmelCase = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCAmelCase = self.get_launcher(_lowerCAmelCase )
_lowerCAmelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
return output_dir
def __lowerCAmelCase ( self , _lowerCAmelCase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCAmelCase = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 664 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664 | 1 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int )->list[tuple[int, int]]:
_lowerCAmelCase , _lowerCAmelCase = position
_lowerCAmelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_lowerCAmelCase = []
for position in positions:
_lowerCAmelCase , _lowerCAmelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[list[int]] )->bool:
return not any(elem == 0 for row in board for elem in row )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int )->bool:
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase , _lowerCAmelCase = position
if board[y][x] == 0:
_lowerCAmelCase = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
_lowerCAmelCase = 0
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->list[list[int]]:
_lowerCAmelCase = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
_lowerCAmelCase = 0
_lowerCAmelCase = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = params
_lowerCAmelCase = np.array(_lowerCAmelCase )
_lowerCAmelCase = np.array([len(_lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _lowerCAmelCase ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def __lowerCAmelCase ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.params.max_model_input_size
_lowerCAmelCase = self.lengths > max_len
logger.info(F'''Splitting {sum(_lowerCAmelCase )} too long sequences.''' )
def divide_chunks(_lowerCAmelCase , _lowerCAmelCase ):
return [l[i : i + n] for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase )]
_lowerCAmelCase = []
_lowerCAmelCase = []
if self.params.mlm:
_lowerCAmelCase , _lowerCAmelCase = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
_lowerCAmelCase , _lowerCAmelCase = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowerCAmelCase = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_lowerCAmelCase = np.insert(_lowerCAmelCase , 0 , _lowerCAmelCase )
if sub_s[-1] != sep_id:
_lowerCAmelCase = np.insert(_lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase )
assert len(_lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_lowerCAmelCase )
new_tok_ids.extend(_lowerCAmelCase )
new_lengths.extend([len(_lowerCAmelCase ) for l in sub_seqs] )
_lowerCAmelCase = np.array(_lowerCAmelCase )
_lowerCAmelCase = np.array(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = len(self )
_lowerCAmelCase = self.lengths > 11
_lowerCAmelCase = self.token_ids[indices]
_lowerCAmelCase = self.lengths[indices]
_lowerCAmelCase = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __lowerCAmelCase ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowerCAmelCase = self.params.special_tok_ids['''unk_token''']
_lowerCAmelCase = len(self )
_lowerCAmelCase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowerCAmelCase = (unk_occs / self.lengths) < 0.5
_lowerCAmelCase = self.token_ids[indices]
_lowerCAmelCase = self.lengths[indices]
_lowerCAmelCase = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __lowerCAmelCase ( self ):
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = [t[0] for t in batch]
_lowerCAmelCase = [t[1] for t in batch]
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
# Max for paddings
_lowerCAmelCase = max(_lowerCAmelCase )
# Pad token ids
if self.params.mlm:
_lowerCAmelCase = self.params.special_tok_ids['''pad_token''']
else:
_lowerCAmelCase = self.params.special_tok_ids['''unk_token''']
_lowerCAmelCase = [list(t.astype(_lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(_lowerCAmelCase )
assert all(len(_lowerCAmelCase ) == max_seq_len_ for t in tk_ )
_lowerCAmelCase = torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowerCAmelCase = torch.tensor(_lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 664 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 664 | 1 |
from collections import namedtuple
UpperCAmelCase_ = namedtuple("from_to", "from_ to")
UpperCAmelCase_ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(_SCREAMING_SNAKE_CASE ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(_SCREAMING_SNAKE_CASE ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
# Initialise PyTorch model
_lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 664 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ = '''AutoImageProcessor'''
SCREAMING_SNAKE_CASE__ = '''AutoTokenizer'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowerCAmelCase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 664 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "Hello world! cécé herlolip"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]:
_lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
_lowerCAmelCase = roberta.model.encoder.sentence_encoder
_lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase = model.roberta.encoder.layer[i]
_lowerCAmelCase = roberta_sent_encoder.layers[i]
_lowerCAmelCase = layer.attention
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase = roberta_layer.final_layer_norm.weight
_lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# output
_lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 664 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Namespace )->Optional[int]:
return TrainCommand(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( snake_case_ ):
@staticmethod
def __lowerCAmelCase ( _lowerCAmelCase ):
_lowerCAmelCase = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowerCAmelCase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=_lowerCAmelCase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=_lowerCAmelCase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=_lowerCAmelCase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=_lowerCAmelCase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowerCAmelCase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=_lowerCAmelCase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=_lowerCAmelCase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=_lowerCAmelCase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=_lowerCAmelCase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=_lowerCAmelCase , default=3E-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=_lowerCAmelCase , default=1E-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = logging.get_logger('''transformers-cli/training''' )
_lowerCAmelCase = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowerCAmelCase )
_lowerCAmelCase = args.output
_lowerCAmelCase = args.column_label
_lowerCAmelCase = args.column_text
_lowerCAmelCase = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase = args.validation_split
_lowerCAmelCase = args.train_batch_size
_lowerCAmelCase = args.valid_batch_size
_lowerCAmelCase = args.learning_rate
_lowerCAmelCase = args.adam_epsilon
def __lowerCAmelCase ( self ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCAmelCase ( self ):
raise NotImplementedError
def __lowerCAmelCase ( self ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 664 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 664 | 1 |
UpperCAmelCase_ = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float )->str:
assert type(_SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ''''''
_lowerCAmelCase = False
if decimal < 0:
_lowerCAmelCase = True
decimal *= -1
while decimal > 0:
_lowerCAmelCase , _lowerCAmelCase = divmod(_SCREAMING_SNAKE_CASE , 1_6 )
_lowerCAmelCase = values[remainder] + hexadecimal
_lowerCAmelCase = '''0x''' + hexadecimal
if negative:
_lowerCAmelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_1_2,
"facebook/dpr-ctx_encoder-multiset-base": 5_1_2,
}
UpperCAmelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": 5_1_2,
"facebook/dpr-question_encoder-multiset-base": 5_1_2,
}
UpperCAmelCase_ = {
"facebook/dpr-reader-single-nq-base": 5_1_2,
"facebook/dpr-reader-multiset-base": 5_1_2,
}
UpperCAmelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = DPRContextEncoderTokenizer
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = DPRQuestionEncoderTokenizer
UpperCAmelCase_ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase_ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(snake_case_ )
class UpperCAmelCase :
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
if titles is None and texts is None:
return super().__call__(
_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
elif titles is None or texts is None:
_lowerCAmelCase = titles if texts is None else texts
return super().__call__(
_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCAmelCase = titles if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [titles]
_lowerCAmelCase = texts if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [texts]
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = questions if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) else [questions] * n_passages
assert len(_lowerCAmelCase ) == len(
_lowerCAmelCase ), F'''There should be as many titles than texts but got {len(_lowerCAmelCase )} titles and {len(_lowerCAmelCase )} texts.'''
_lowerCAmelCase = super().__call__(_lowerCAmelCase , _lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )['''input_ids''']
_lowerCAmelCase = super().__call__(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase )['''input_ids''']
_lowerCAmelCase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowerCAmelCase , _lowerCAmelCase )
]
}
if return_attention_mask is not False:
_lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase = attention_mask
return self.pad(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 16 , _lowerCAmelCase = 64 , _lowerCAmelCase = 4 , ):
_lowerCAmelCase = reader_input['''input_ids''']
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = reader_output[:3]
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = sorted(range(_lowerCAmelCase ) , reverse=_lowerCAmelCase , key=relevance_logits.__getitem__ )
_lowerCAmelCase = []
for doc_id in sorted_docs:
_lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase = len(_lowerCAmelCase )
_lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowerCAmelCase , top_spans=_lowerCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowerCAmelCase , start_index=_lowerCAmelCase , end_index=_lowerCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = []
for start_index, start_score in enumerate(_lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[1] , reverse=_lowerCAmelCase )
_lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
_lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case_ )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = DPRReaderTokenizer
| 664 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 664 | 1 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''xlm-prophetnet'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self , _lowerCAmelCase = 0.1 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 30_522 , _lowerCAmelCase = 1_024 , _lowerCAmelCase = 4_096 , _lowerCAmelCase = 12 , _lowerCAmelCase = 16 , _lowerCAmelCase = 4_096 , _lowerCAmelCase = 12 , _lowerCAmelCase = 16 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 512 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 32 , _lowerCAmelCase = 128 , _lowerCAmelCase = False , _lowerCAmelCase = 0.0 , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = 1 , _lowerCAmelCase = 2 , **_lowerCAmelCase , ):
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = num_encoder_layers
_lowerCAmelCase = num_encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = num_decoder_layers
_lowerCAmelCase = num_decoder_attention_heads
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = init_std # Normal(0, this parameter)
_lowerCAmelCase = activation_function
# parameters for xlmprophetnet
_lowerCAmelCase = ngram
_lowerCAmelCase = num_buckets
_lowerCAmelCase = relative_max_distance
_lowerCAmelCase = disable_ngram_loss
_lowerCAmelCase = eps
# 3 Types of Dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = dropout
_lowerCAmelCase = use_cache
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , add_cross_attention=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
@property
def __lowerCAmelCase ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __lowerCAmelCase ( self , _lowerCAmelCase ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 664 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 664 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=0.6 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = mask_ratio
_lowerCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFViTMAEModel(config=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = TFViTMAEForPreTraining(_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , training=_lowerCAmelCase )
# expected sequence length = num_patches
_lowerCAmelCase = (self.image_size // self.patch_size) ** 2
_lowerCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = TFViTMAEForPreTraining(_lowerCAmelCase )
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowerCAmelCase , training=_lowerCAmelCase )
_lowerCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( snake_case_ ,snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
_lowerCAmelCase = TFViTMAEModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __lowerCAmelCase ( self ):
pass
def __lowerCAmelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , noise=_lowerCAmelCase )
_lowerCAmelCase = copy.deepcopy(self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = model(**_lowerCAmelCase , noise=_lowerCAmelCase )
_lowerCAmelCase = outputs_dict[0].numpy()
_lowerCAmelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __lowerCAmelCase ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_lowerCAmelCase ):
_lowerCAmelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCAmelCase ):
_lowerCAmelCase = v.numpy()
else:
_lowerCAmelCase = np.array(_lowerCAmelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = prepare_numpy_arrays(_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , noise=_lowerCAmelCase )
_lowerCAmelCase = model(**_lowerCAmelCase , noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# make masks reproducible
np.random.seed(2 )
_lowerCAmelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase = tf.constant(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCAmelCase = tf_noise
super().check_pt_tf_models(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCAmelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(_lowerCAmelCase , _lowerCAmelCase ),)
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCAmelCase , '''_keras_serializable''' , _lowerCAmelCase )
}
_lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCAmelCase = tf.convert_to_tensor(_lowerCAmelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCAmelCase = main_layer_class(_lowerCAmelCase )
_lowerCAmelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCAmelCase = tf.keras.Model(_lowerCAmelCase , outputs=main_layer(_lowerCAmelCase ) )
_lowerCAmelCase = model(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , '''keras_model.h5''' )
model.save(_lowerCAmelCase )
_lowerCAmelCase = tf.keras.models.load_model(
_lowerCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCAmelCase , tf.keras.Model )
_lowerCAmelCase = model(_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase , _lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_lowerCAmelCase = outputs.last_hidden_state.numpy()
_lowerCAmelCase = 0
else:
_lowerCAmelCase = outputs.logits.numpy()
_lowerCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase , saved_model=_lowerCAmelCase )
_lowerCAmelCase = model_class.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , noise=_lowerCAmelCase )
if model_class.__name__ == "TFViTMAEModel":
_lowerCAmelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCAmelCase = 0
else:
_lowerCAmelCase = after_outputs['''logits'''].numpy()
_lowerCAmelCase = 0
_lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1E-5 )
def __lowerCAmelCase ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , noise=_lowerCAmelCase )
_lowerCAmelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCAmelCase )
_lowerCAmelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCAmelCase = model_class.from_config(model.config )
_lowerCAmelCase = new_model(_lowerCAmelCase ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCAmelCase = new_model(_lowerCAmelCase , noise=_lowerCAmelCase )
self.assert_outputs_same(_lowerCAmelCase , _lowerCAmelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __lowerCAmelCase ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __lowerCAmelCase ( self ):
pass
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCAmelCase__ ( )->Optional[Any]:
_lowerCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCAmelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCAmelCase = ViTMAEConfig()
_lowerCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCAmelCase = model(**_lowerCAmelCase , noise=_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 664 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664 | 1 |
UpperCAmelCase_ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 664 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 664 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase_ = "\\n Text data.\n Second line of data."
UpperCAmelCase_ = "file"
@pytest.fixture(scope='''session''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->str:
_lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_lowerCAmelCase = bytes(_SCREAMING_SNAKE_CASE , '''utf-8''' )
with zstd.open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->int:
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , '''w''' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] )->Optional[Any]:
_lowerCAmelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_lowerCAmelCase = input_paths[compression_format]
_lowerCAmelCase = tmp_path / '''cache'''
_lowerCAmelCase = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
_lowerCAmelCase = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
_lowerCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple )->int:
_lowerCAmelCase = '''custom_cache'''
_lowerCAmelCase = '''custom_extracted_dir'''
_lowerCAmelCase = tmp_path / '''custom_extracted_path'''
if default_extracted:
_lowerCAmelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCAmelCase = xz_file
_lowerCAmelCase = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
_lowerCAmelCase = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] )->Union[str, Any]:
# absolute path
_lowerCAmelCase = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
_lowerCAmelCase = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->int:
# absolute path
_lowerCAmelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
_lowerCAmelCase = '''./__missing_file__.txt'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->Tuple:
_lowerCAmelCase = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(_SCREAMING_SNAKE_CASE ) as f:
_lowerCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->Optional[int]:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->Optional[int]:
_lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get('''https://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get('''ftp://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Any:
_lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get('''s3://huggingface.co''' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head('''s3://huggingface.co''' )
| 664 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 664 | 1 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , ):
_lowerCAmelCase = size if size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DPTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DPTImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 664 |
import numpy
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase = numpy.zeros(output_array.shape )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __lowerCAmelCase ( self ):
_lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = input_arr
_lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : numpy.ndarray )->numpy.ndarray:
return (value) * (1 - (value))
def UpperCAmelCase__ ( )->int:
_lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=1_0 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 664 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
_lowerCAmelCase = [file for file in os.listdir(_lowerCAmelCase ) if os.path.isfile(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )]
if identifier is not None:
_lowerCAmelCase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for n_ in n_identifier:
_lowerCAmelCase = [file for file in files if n_ not in file]
else:
_lowerCAmelCase = [file for file in files if n_identifier not in file]
_lowerCAmelCase = ignore_files or []
ignore_files.append('''__init__.py''' )
_lowerCAmelCase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , _lowerCAmelCase )
if only_modules:
_lowerCAmelCase = file.split('''.''' )[0]
try:
_lowerCAmelCase = getattr(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = doctest.DocTestSuite(_lowerCAmelCase )
_lowerCAmelCase = unittest.TextTestRunner().run(_lowerCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_lowerCAmelCase = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = Path('''src/transformers''' )
_lowerCAmelCase = '''modeling'''
_lowerCAmelCase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase , ignore_files=_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = Path('''src/transformers''' )
_lowerCAmelCase = '''tokenization'''
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = Path('''src/transformers''' )
_lowerCAmelCase = '''configuration'''
self.analyze_directory(_lowerCAmelCase , identifier=_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = Path('''src/transformers''' )
_lowerCAmelCase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(_lowerCAmelCase , n_identifier=_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = Path('''docs/source''' )
_lowerCAmelCase = ['''favicon.ico''']
self.analyze_directory(_lowerCAmelCase , ignore_files=_lowerCAmelCase , only_modules=_lowerCAmelCase )
| 664 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
UpperCAmelCase_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )->Optional[Any]:
for attribute in key.split('''.''' ):
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any )->List[str]:
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
_lowerCAmelCase = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowerCAmelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCAmelCase = '''weight_v'''
elif "bias" in name:
_lowerCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = '''weight'''
else:
_lowerCAmelCase = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int )->str:
_lowerCAmelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCAmelCase = name.split('''.''' )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=True )->Any:
if config_path is not None:
_lowerCAmelCase = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = UniSpeechSatConfig()
_lowerCAmelCase = ''''''
if is_finetuned:
_lowerCAmelCase = UniSpeechSatForCTC(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = UniSpeechSatForPreTraining(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 664 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE : Tuple )->List[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Exception )->bool:
_lowerCAmelCase = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : callable = None , _SCREAMING_SNAKE_CASE : int = 1_2_8 )->Optional[int]:
if function is None:
return functools.partial(_SCREAMING_SNAKE_CASE , starting_batch_size=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE : Optional[int] , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_lowerCAmelCase = list(inspect.signature(_SCREAMING_SNAKE_CASE ).parameters.keys() )
# Guard against user error
if len(_SCREAMING_SNAKE_CASE ) < (len(_SCREAMING_SNAKE_CASE ) + 1):
_lowerCAmelCase = ''', '''.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
except Exception as e:
if should_reduce_batch_size(_SCREAMING_SNAKE_CASE ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 664 | 1 |
from jiwer import compute_measures
import datasets
UpperCAmelCase_ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCAmelCase_ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
UpperCAmelCase_ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False ):
if concatenate_texts:
return compute_measures(_lowerCAmelCase , _lowerCAmelCase )["wer"]
else:
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for prediction, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = compute_measures(_lowerCAmelCase , _lowerCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 664 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=8 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=16 , _lowerCAmelCase=5 , _lowerCAmelCase=2 , _lowerCAmelCase=36 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def __lowerCAmelCase ( self ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = 300
return config
def __lowerCAmelCase ( self ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowerCAmelCase = True
_lowerCAmelCase = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = ()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def __lowerCAmelCase ( self ):
return
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
_lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
_lowerCAmelCase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = 50_265
_lowerCAmelCase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 664 | 1 |
from math import ceil, sqrt
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 )->int:
_lowerCAmelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
_lowerCAmelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
_lowerCAmelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 664 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 664 | 1 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->float:
_lowerCAmelCase = 0
while len(_SCREAMING_SNAKE_CASE ) > 1:
_lowerCAmelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_lowerCAmelCase = files.index(min(_SCREAMING_SNAKE_CASE ) )
temp += files[min_index]
files.pop(_SCREAMING_SNAKE_CASE )
files.append(_SCREAMING_SNAKE_CASE )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
_lowerCAmelCase = size if size is not None else {'''shortest_edge''': 18}
_lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_frames
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = crop_size
def __lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
_lowerCAmelCase = VivitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self ):
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 664 | 1 |
UpperCAmelCase_ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCAmelCase_ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 1_2,
"Pm": 1_5,
"Em": 1_8,
"Zm": 2_1,
"Ym": 2_4,
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = from_type.lower().strip('''s''' )
_lowerCAmelCase = to_type.lower().strip('''s''' )
_lowerCAmelCase = UNIT_SYMBOL.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = UNIT_SYMBOL.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(_SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(_SCREAMING_SNAKE_CASE )}'''
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase = METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase = 1
if from_exponent > to_exponent:
_lowerCAmelCase = from_exponent - to_exponent
else:
_lowerCAmelCase = -(to_exponent - from_exponent)
return value * pow(1_0 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 664 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ):
_lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ):
_lowerCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int:
_lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]:
_lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase = scount * numref
_lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase = ccount * numref
# KEEP
_lowerCAmelCase = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase = keepgramcounter_rep & rgramcounter
_lowerCAmelCase = sgramcounter_rep & rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase = delgramcounter_rep - rgramcounter
_lowerCAmelCase = sgramcounter_rep - rgramcounter
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase = 1
_lowerCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]:
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = ssent.split(''' ''' )
_lowerCAmelCase = csent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for rsent in rsents:
_lowerCAmelCase = rsent.split(''' ''' )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = sentence
if not return_str:
_lowerCAmelCase = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str:
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 1_0_0 * sari_score
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str:
_lowerCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_lowerCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __lowerCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = {}
result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} )
return result
| 664 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = XLMTokenizer
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowerCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_lowerCAmelCase ) )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = '''lower newer'''
_lowerCAmelCase = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ):
_lowerCAmelCase = XLMTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase = '''lower'''
_lowerCAmelCase = ['''low''', '''er</w>''']
_lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = tokens + ['''<unk>''']
_lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
@slow
def __lowerCAmelCase ( self ):
_lowerCAmelCase = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
_lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 664 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger("transformers.models.speecht5")
UpperCAmelCase_ = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
UpperCAmelCase_ = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
UpperCAmelCase_ = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
UpperCAmelCase_ = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
UpperCAmelCase_ = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
UpperCAmelCase_ = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
UpperCAmelCase_ = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
UpperCAmelCase_ = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
UpperCAmelCase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCAmelCase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase_ = []
UpperCAmelCase_ = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
UpperCAmelCase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int )->Tuple:
for attribute in key.split('''.''' ):
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "running_mean":
_lowerCAmelCase = value
elif weight_type == "running_var":
_lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )->Dict:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCAmelCase , _lowerCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->int:
_lowerCAmelCase = []
if task == "s2t":
_lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase = MAPPING_S2T
_lowerCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_lowerCAmelCase = None
_lowerCAmelCase = MAPPING_T2S
_lowerCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_lowerCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase = MAPPING_S2S
_lowerCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info(f'''{name} was ignored''' )
continue
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
_lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowerCAmelCase , _lowerCAmelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_lowerCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
_lowerCAmelCase = mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowerCAmelCase = '''weight_g'''
elif "weight_v" in name:
_lowerCAmelCase = '''weight_v'''
elif "bias" in name:
_lowerCAmelCase = '''bias'''
elif "weight" in name:
_lowerCAmelCase = '''weight'''
elif "running_mean" in name:
_lowerCAmelCase = '''running_mean'''
elif "running_var" in name:
_lowerCAmelCase = '''running_var'''
elif "num_batches_tracked" in name:
_lowerCAmelCase = '''num_batches_tracked'''
else:
_lowerCAmelCase = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )->List[str]:
_lowerCAmelCase = full_name.split('''conv_layers.''' )[-1]
_lowerCAmelCase = name.split('''.''' )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
_lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Tuple=None , )->Optional[Any]:
if config_path is not None:
_lowerCAmelCase = SpeechTaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = SpeechTaConfig()
if task == "s2t":
_lowerCAmelCase = config.max_text_positions
_lowerCAmelCase = SpeechTaForSpeechToText(_SCREAMING_SNAKE_CASE )
elif task == "t2s":
_lowerCAmelCase = 1_8_7_6
_lowerCAmelCase = 6_0_0
_lowerCAmelCase = config.max_speech_positions
_lowerCAmelCase = SpeechTaForTextToSpeech(_SCREAMING_SNAKE_CASE )
elif task == "s2s":
_lowerCAmelCase = 1_8_7_6
_lowerCAmelCase = config.max_speech_positions
_lowerCAmelCase = SpeechTaForSpeechToSpeech(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
_lowerCAmelCase = SpeechTaTokenizer(_SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken('''<mask>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_lowerCAmelCase = SpeechTaFeatureExtractor()
_lowerCAmelCase = SpeechTaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = torch.load(_SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint['''model'''] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 664 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 664 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 664 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664 | 1 |
import random
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : Optional[int] )->tuple:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int )->Any:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
_lowerCAmelCase = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
_lowerCAmelCase = 0
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 664 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase = deprecated_arg[3:]
_lowerCAmelCase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_lowerCAmelCase = kwargs.pop('''tpu_name''' , self.tpu_name )
_lowerCAmelCase = kwargs.pop('''device_idx''' , self.device_idx )
_lowerCAmelCase = kwargs.pop('''eager_mode''' , self.eager_mode )
_lowerCAmelCase = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = field(
default=snake_case_ ,metadata={'''help''': '''Name of TPU'''} ,)
SCREAMING_SNAKE_CASE__ = field(
default=0 ,metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} ,)
SCREAMING_SNAKE_CASE__ = field(default=snake_case_ ,metadata={'''help''': '''Benchmark models in eager model.'''} )
SCREAMING_SNAKE_CASE__ = field(
default=snake_case_ ,metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} ,)
@cached_property
def __lowerCAmelCase ( self ):
requires_backends(self , ['''tf'''] )
_lowerCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
_lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCAmelCase = None
return tpu
@cached_property
def __lowerCAmelCase ( self ):
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
_lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
_lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def __lowerCAmelCase ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def __lowerCAmelCase ( self ):
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def __lowerCAmelCase ( self ):
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def __lowerCAmelCase ( self ):
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowerCAmelCase ( self ):
return self.n_gpu > 0
| 664 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 664 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
# Initialize Result
_lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_SCREAMING_SNAKE_CASE ):
# Find denominations
while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ):
total_value -= int(_SCREAMING_SNAKE_CASE )
answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ = []
UpperCAmelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCAmelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
UpperCAmelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(F"""Following is minimal change for {value}: """)
UpperCAmelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 664 | 1 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list[int] )->int:
if not nums:
return 0
_lowerCAmelCase = nums[0]
_lowerCAmelCase = 0
for num in nums[1:]:
_lowerCAmelCase , _lowerCAmelCase = (
max_excluding + num,
max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
)
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
# Initialise PyTorch model
_lowerCAmelCase = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = AlbertForPreTraining(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 664 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ = threading.Lock()
UpperCAmelCase_ = None
UpperCAmelCase_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
UpperCAmelCase_ = logging.WARNING
UpperCAmelCase_ = True
def UpperCAmelCase__ ( )->str:
_lowerCAmelCase = os.getenv('''TRANSFORMERS_VERBOSITY''' , _SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase__ ( )->str:
return __name__.split('''.''' )[0]
def UpperCAmelCase__ ( )->logging.Logger:
return logging.getLogger(_get_library_name() )
def UpperCAmelCase__ ( )->None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCAmelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCAmelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCAmelCase = False
def UpperCAmelCase__ ( )->None:
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCAmelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCAmelCase = None
def UpperCAmelCase__ ( )->Optional[Any]:
return log_levels
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[str] = None )->logging.Logger:
if name is None:
_lowerCAmelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int )->None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->Optional[int]:
return set_verbosity(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->Union[str, Any]:
return set_verbosity(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->Dict:
return set_verbosity(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->Optional[Any]:
return set_verbosity(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase__ ( )->None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : logging.Handler )->None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : logging.Handler )->None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->None:
_configure_library_root_logger()
_lowerCAmelCase = False
def UpperCAmelCase__ ( )->None:
_configure_library_root_logger()
_lowerCAmelCase = True
def UpperCAmelCase__ ( )->None:
_lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
_lowerCAmelCase = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( )->None:
_lowerCAmelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any , *_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Union[str, Any] )->Optional[Any]:
_lowerCAmelCase = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , _SCREAMING_SNAKE_CASE )
if no_advisory_warnings:
return
self.warning(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = warning_advice
@functools.lru_cache(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , *_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Optional[int] )->Dict:
self.warning(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = warning_once
class UpperCAmelCase :
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): # pylint: disable=unused-argument
_lowerCAmelCase = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , _lowerCAmelCase ):
def empty_fn(*_lowerCAmelCase , **_lowerCAmelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return
class UpperCAmelCase :
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCAmelCase , **_lowerCAmelCase )
else:
return EmptyTqdm(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
_lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ = _tqdm_cls()
def UpperCAmelCase__ ( )->bool:
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase__ ( )->List[Any]:
global _tqdm_active
_lowerCAmelCase = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase__ ( )->Optional[int]:
global _tqdm_active
_lowerCAmelCase = False
hf_hub_utils.disable_progress_bars()
| 664 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = "Hello world! cécé herlolip"
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool )->List[Any]:
_lowerCAmelCase = FairseqRobertaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
_lowerCAmelCase = roberta.model.encoder.sentence_encoder
_lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = XLMRobertaXLForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase = model.roberta.encoder.layer[i]
_lowerCAmelCase = roberta_sent_encoder.layers[i]
_lowerCAmelCase = layer.attention
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase = roberta_layer.final_layer_norm.weight
_lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# output
_lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase = roberta_layer.fca.weight
_lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase = roberta.model.encoder.lm_head.weight
_lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase = roberta.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowerCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
_lowerCAmelCase = roberta.model(_SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
_lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCAmelCase = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 664 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->List[str]:
_lowerCAmelCase = 1.5
_lowerCAmelCase = int(factor * num_class_images )
_lowerCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_SCREAMING_SNAKE_CASE )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
_lowerCAmelCase = client.query(text=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
_lowerCAmelCase = int(factor * num_images )
_lowerCAmelCase = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = tqdm(desc='''downloading real regularization images''' , total=_SCREAMING_SNAKE_CASE )
with open(f'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(f'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
f'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
_lowerCAmelCase = class_images[count]
count += 1
try:
_lowerCAmelCase = requests.get(images['''url'''] )
if img.status_code == 2_0_0:
_lowerCAmelCase = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase__ ( )->List[str]:
_lowerCAmelCase = argparse.ArgumentParser('''''' , add_help=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_0_0 , type=_SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 664 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=0.999 , _SCREAMING_SNAKE_CASE : List[str]="cosine" , )->Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_SCREAMING_SNAKE_CASE : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCAmelCase = []
for i in range(_SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = i / num_diffusion_timesteps
_lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_SCREAMING_SNAKE_CASE ) / alpha_bar_fn(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self , _lowerCAmelCase = 1_000 , _lowerCAmelCase = 0.0_001 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = "linear" , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0 , _lowerCAmelCase = "epsilon" , _lowerCAmelCase = 1.0 , **_lowerCAmelCase , ):
if kwargs.get('''set_alpha_to_one''' , _lowerCAmelCase ) is not None:
_lowerCAmelCase = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
_lowerCAmelCase = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
_lowerCAmelCase = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCAmelCase = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCAmelCase = 1.0 - self.betas
_lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCAmelCase = 1.0
# setable values
_lowerCAmelCase = None
_lowerCAmelCase = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
return sample
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCAmelCase = num_inference_steps
_lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCAmelCase = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCAmelCase = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = True , ):
# 1. get previous step value (=t+1)
_lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCAmelCase = self.alphas_cumprod[timestep]
_lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
_lowerCAmelCase = model_output
_lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 664 | 1 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->bool:
_lowerCAmelCase = get_failure_array(_SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
_lowerCAmelCase , _lowerCAmelCase = 0, 0 # index into text, pattern
while i < len(_SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(_SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_lowerCAmelCase = failure[j - 1]
continue
i += 1
return False
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->list[int]:
_lowerCAmelCase = [0]
_lowerCAmelCase = 0
_lowerCAmelCase = 1
while j < len(_SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_lowerCAmelCase = failure[i - 1]
continue
j += 1
failure.append(_SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
UpperCAmelCase_ = "abc1abc12"
UpperCAmelCase_ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCAmelCase_ = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCAmelCase_ = "ABABX"
UpperCAmelCase_ = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
UpperCAmelCase_ = "AAAB"
UpperCAmelCase_ = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
UpperCAmelCase_ = "abcdabcy"
UpperCAmelCase_ = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
UpperCAmelCase_ = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 664 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["DeiTFeatureExtractor"]
UpperCAmelCase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 664 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self , _lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_lowerCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sgugger/tiny-distilbert-classification'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , only_pretrain_model=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , torchscript=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , fpaa=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase = None
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tinier_bart'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tinier_bart'''
_lowerCAmelCase = AutoConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase , configs=[config] )
_lowerCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , save_to_csv=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCAmelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowerCAmelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCAmelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowerCAmelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowerCAmelCase , '''env.csv''' ) , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''env.csv''' ) ).exists() )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase , '''sequential''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''current''' ) )
self.assertTrue(hasattr(_lowerCAmelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCAmelCase , inference=_lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCAmelCase , '''log.txt''' ) , log_print=_lowerCAmelCase , trace_memory_line_by_line=_lowerCAmelCase , multi_process=_lowerCAmelCase , )
_lowerCAmelCase = PyTorchBenchmark(_lowerCAmelCase )
_lowerCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase , '''log.txt''' ) ).exists() )
| 664 |
from __future__ import annotations
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : list )->list:
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_lowerCAmelCase , _lowerCAmelCase = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = int(max_value - min_value ) + 1
_lowerCAmelCase = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 664 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = 42
class UpperCAmelCase ( snake_case_ ,snake_case_ ):
@register_to_config
def __init__( self , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = ("DownEncoderBlock2D",) , _lowerCAmelCase = ("UpDecoderBlock2D",) , _lowerCAmelCase = (64,) , _lowerCAmelCase = 1 , _lowerCAmelCase = "silu" , _lowerCAmelCase = 3 , _lowerCAmelCase = 32 , _lowerCAmelCase = 256 , _lowerCAmelCase = 32 , _lowerCAmelCase = None , _lowerCAmelCase = 0.18_215 , _lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
_lowerCAmelCase = Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
_lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCAmelCase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
_lowerCAmelCase = VectorQuantizer(_lowerCAmelCase , _lowerCAmelCase , beta=0.25 , remap=_lowerCAmelCase , sane_index_shape=_lowerCAmelCase )
_lowerCAmelCase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
# pass init params to Decoder
_lowerCAmelCase = Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , norm_type=_lowerCAmelCase , )
@apply_forward_hook
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = True ):
_lowerCAmelCase = self.encoder(_lowerCAmelCase )
_lowerCAmelCase = self.quant_conv(_lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCAmelCase )
@apply_forward_hook
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.quantize(_lowerCAmelCase )
else:
_lowerCAmelCase = h
_lowerCAmelCase = self.post_quant_conv(_lowerCAmelCase )
_lowerCAmelCase = self.decoder(_lowerCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = True ):
_lowerCAmelCase = sample
_lowerCAmelCase = self.encode(_lowerCAmelCase ).latents
_lowerCAmelCase = self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 664 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 664 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.