code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import math
class __UpperCamelCase :
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = 0.0
_lowerCAmelCase : Tuple = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) ,2 )
da += math.pow((sample[i] - weights[1][i]) ,2 )
return 0 if da > da else 1
return 0
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_lowerCAmelCase : int = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_lowerCAmelCase : Optional[int] = SelfOrganizingMap()
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Tuple = 0.5
for _ in range(UpperCamelCase__ ):
for j in range(len(UpperCamelCase__ ) ):
# training sample
_lowerCAmelCase : Any = training_samples[j]
# Compute the winning vector
_lowerCAmelCase : Optional[int] = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# Update the winning vector
_lowerCAmelCase : List[Any] = self_organizing_map.update(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# classify test sample
_lowerCAmelCase : Dict = [0, 0, 0, 1]
_lowerCAmelCase : Dict = self_organizing_map.get_winner(UpperCamelCase__ , UpperCamelCase__ )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 719 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
_lowerCAmelCase = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
_lowerCAmelCase = {
"jukebox": 5_1_2,
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_LYRIC_TOKENS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A ,_A ,_A=["v3", "v2", "v2"] ,_A=512 ,_A=5 ,_A="<|endoftext|>" ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = AddedToken(UpperCAmelCase__ ,lstrip=UpperCAmelCase__ ,rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase__ ,n_genres=UpperCAmelCase__ ,version=UpperCAmelCase__ ,max_n_lyric_tokens=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
_lowerCAmelCase : int = version
_lowerCAmelCase : Optional[Any] = max_n_lyric_tokens
_lowerCAmelCase : List[str] = n_genres
with open(UpperCAmelCase__ ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Dict = json.load(UpperCAmelCase__ )
with open(UpperCAmelCase__ ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(UpperCAmelCase__ )
with open(UpperCAmelCase__ ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Union[str, Any] = json.load(UpperCAmelCase__ )
_lowerCAmelCase : Optional[Any] = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_lowerCAmelCase : Optional[Any] = oov.replace(r'\-\'' ,r'\-+\'' )
_lowerCAmelCase : str = regex.compile(UpperCAmelCase__ )
_lowerCAmelCase : Dict = {v: k for k, v in self.artists_encoder.items()}
_lowerCAmelCase : str = {v: k for k, v in self.genres_encoder.items()}
_lowerCAmelCase : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = [self.artists_encoder.get(UpperCAmelCase__ ,0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase__ ) ):
_lowerCAmelCase : List[str] = [self.genres_encoder.get(UpperCAmelCase__ ,0 ) for genre in list_genres[genres]]
_lowerCAmelCase : Any = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_lowerCAmelCase : int = [[self.lyrics_encoder.get(UpperCAmelCase__ ,0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return list(UpperCAmelCase__ )
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = self.prepare_for_tokenization(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
_lowerCAmelCase : str = self._tokenize(UpperCAmelCase__ )
return artist, genre, lyrics
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_lowerCAmelCase : str = artists[idx].lower()
_lowerCAmelCase : Dict = [genres[idx].lower()]
else:
_lowerCAmelCase : List[Any] = self._normalize(artists[idx] ) + '.v2'
_lowerCAmelCase : List[Any] = [
self._normalize(UpperCAmelCase__ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_lowerCAmelCase : Optional[Any] = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
_lowerCAmelCase : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
_lowerCAmelCase : Optional[Any] = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase__ ) )}
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Union[str, Any] = len(UpperCAmelCase__ ) + 1
_lowerCAmelCase : int = self.vocab
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
_lowerCAmelCase : Dict = ''
else:
_lowerCAmelCase : int = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
_lowerCAmelCase : List[str] = self._run_strip_accents(UpperCAmelCase__ )
_lowerCAmelCase : List[Any] = lyrics.replace('\\' ,'\n' )
_lowerCAmelCase : List[Any] = self.out_of_vocab.sub('' ,UpperCAmelCase__ ), [], []
return artists, genres, lyrics
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = unicodedata.normalize('NFD' ,UpperCAmelCase__ )
_lowerCAmelCase : Optional[Any] = []
for char in text:
_lowerCAmelCase : List[str] = unicodedata.category(UpperCAmelCase__ )
if cat == "Mn":
continue
output.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = (
[chr(UpperCAmelCase__ ) for i in range(ord('a' ) ,ord('z' ) + 1 )]
+ [chr(UpperCAmelCase__ ) for i in range(ord('A' ) ,ord('Z' ) + 1 )]
+ [chr(UpperCAmelCase__ ) for i in range(ord('0' ) ,ord('9' ) + 1 )]
+ ['.']
)
_lowerCAmelCase : List[str] = frozenset(UpperCAmelCase__ )
_lowerCAmelCase : List[str] = re.compile(r'_+' )
_lowerCAmelCase : int = ''.join([c if c in accepted else '_' for c in text.lower()] )
_lowerCAmelCase : int = pattern.sub('_' ,UpperCAmelCase__ ).strip('_' )
return text
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return " ".join(UpperCAmelCase__ )
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
_lowerCAmelCase : List[str] = TensorType(UpperCAmelCase__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
_lowerCAmelCase : Optional[Any] = tf.constant
_lowerCAmelCase : Tuple = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
_lowerCAmelCase : Optional[Any] = torch.tensor
_lowerCAmelCase : List[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
_lowerCAmelCase : int = jnp.array
_lowerCAmelCase : Dict = _is_jax
else:
_lowerCAmelCase : Optional[int] = np.asarray
_lowerCAmelCase : Union[str, Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_lowerCAmelCase : Any = [inputs]
if not is_tensor(UpperCAmelCase__ ):
_lowerCAmelCase : Dict = as_tensor(UpperCAmelCase__ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self ,_A ,_A ,_A="" ,_A="pt" ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [0, 0, 0]
_lowerCAmelCase : List[str] = [artist] * len(self.version )
_lowerCAmelCase : str = [genres] * len(self.version )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = self.tokenize(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[int] = self._convert_token_to_id(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
_lowerCAmelCase : Dict = [-INFINITY] * len(full_tokens[-1] )
_lowerCAmelCase : Optional[int] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=UpperCAmelCase__ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : Any = os.path.join(
UpperCAmelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(UpperCAmelCase__ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=UpperCAmelCase__ ) )
_lowerCAmelCase : int = os.path.join(
UpperCAmelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(UpperCAmelCase__ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=UpperCAmelCase__ ) )
_lowerCAmelCase : List[str] = os.path.join(
UpperCAmelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(UpperCAmelCase__ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=UpperCAmelCase__ ) )
return (artists_file, genres_file, lyrics_file)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.artists_decoder.get(UpperCAmelCase__ )
_lowerCAmelCase : Dict = [self.genres_decoder.get(UpperCAmelCase__ ) for genre in genres_index]
_lowerCAmelCase : List[Any] = [self.lyrics_decoder.get(UpperCAmelCase__ ) for character in lyric_index]
return artist, genres, lyrics
| 720 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 0 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_lowerCAmelCase = getLogger(__name__)
_lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 8 , _lowerCamelCase = DEFAULT_DEVICE , _lowerCamelCase=False , _lowerCamelCase="summarization" , _lowerCamelCase=None , **_lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = Path(_lowerCamelCase ).open('w' , encoding='utf-8' )
_lowerCAmelCase : List[str] = str(_lowerCamelCase )
_lowerCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if fpaa:
_lowerCAmelCase : List[Any] = model.half()
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(_lowerCamelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
_lowerCAmelCase : Tuple = time.time()
# update config with task specific params
use_task_specific_params(_lowerCamelCase , _lowerCamelCase )
if prefix is None:
_lowerCAmelCase : Dict = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(_lowerCamelCase , _lowerCamelCase ) ) ):
_lowerCAmelCase : Dict = [prefix + text for text in examples_chunk]
_lowerCAmelCase : List[str] = tokenizer(_lowerCamelCase , return_tensors='pt' , truncation=_lowerCamelCase , padding='longest' ).to(_lowerCamelCase )
_lowerCAmelCase : Tuple = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCamelCase , )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
_lowerCAmelCase : Optional[int] = int(time.time() - start_time ) # seconds
_lowerCAmelCase : Any = len(_lowerCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCamelCase__ ( ):
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowerCamelCase__ ( _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('model_name' , type=_lowerCamelCase , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=_lowerCamelCase , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=_lowerCamelCase , help='where to save summaries' )
parser.add_argument('--reference_path' , type=_lowerCamelCase , required=_lowerCamelCase , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=_lowerCamelCase , required=_lowerCamelCase , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=_lowerCamelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=_lowerCamelCase , default=8 , required=_lowerCamelCase , help='batch size' )
parser.add_argument(
'--n_obs' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=_lowerCamelCase , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_lowerCAmelCase, _lowerCAmelCase : Dict = parser.parse_known_args()
_lowerCAmelCase : Union[str, Any] = parse_numeric_n_bool_cl_kwargs(_lowerCamelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
_lowerCAmelCase : Tuple = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_lowerCAmelCase : List[Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
_lowerCAmelCase : List[str] = generate_summaries_or_translations(
_lowerCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
_lowerCAmelCase : List[str] = calculate_bleu if 'translation' in args.task else calculate_rouge
_lowerCAmelCase : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
_lowerCAmelCase : Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCamelCase )]
_lowerCAmelCase : List[str] = score_fn(_lowerCamelCase , _lowerCamelCase )
scores.update(_lowerCamelCase )
if args.dump_args:
scores.update(_lowerCamelCase )
if args.info:
_lowerCAmelCase : List[str] = args.info
if verbose:
print(_lowerCamelCase )
if args.score_path is not None:
json.dump(_lowerCamelCase , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 721 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 700 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 0 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCAmelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_lowerCAmelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_lowerCAmelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCAmelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCAmelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for tf_name, hf_name in patterns:
_lowerCAmelCase : Optional[Any] = k.replace(lowercase__ , lowercase__ )
return k
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = BigBirdPegasusConfig(**lowercase__ )
_lowerCAmelCase : int = BigBirdPegasusForConditionalGeneration(lowercase__ )
_lowerCAmelCase : Dict = torch_model.state_dict()
_lowerCAmelCase : Union[str, Any] = {}
# separating decoder weights
_lowerCAmelCase : str = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
_lowerCAmelCase : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
_lowerCAmelCase : int = [k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
_lowerCAmelCase : Optional[int] = DECODER_PATTERNS
_lowerCAmelCase : Union[str, Any] = rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowerCAmelCase : str = v.T
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(lowercase__ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
_lowerCAmelCase : str = [k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
_lowerCAmelCase : int = REMAINING_PATTERNS
_lowerCAmelCase : List[Any] = rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowerCAmelCase : Optional[Any] = v.T
_lowerCAmelCase : Dict = torch.from_numpy(lowercase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_lowerCAmelCase : Tuple = mapping['model.embed_positions.weight']
_lowerCAmelCase : str = mapping.pop('model.embed_positions.weight' )
_lowerCAmelCase, _lowerCAmelCase : Tuple = torch_model.load_state_dict(lowercase__ , strict=lowercase__ )
_lowerCAmelCase : Any = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tf.train.list_variables(lowercase__ )
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : str = ['global_step']
for name, shape in tqdm(lowercase__ , desc='converting tf checkpoint to dict' ):
_lowerCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCAmelCase : Optional[Any] = tf.train.load_variable(lowercase__ , lowercase__ )
_lowerCAmelCase : List[Any] = array
return tf_weights
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_tf_weights_as_numpy(lowercase__ )
_lowerCAmelCase : Optional[int] = convert_bigbird_pegasus(lowercase__ , lowercase__ )
torch_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( a__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = ort.SessionOptions()
_lowerCAmelCase : str = False
return options
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowerCAmelCase : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,revision='onnx' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowercase__ )
_lowerCAmelCase : Dict = '''A red cat sitting on a park bench'''
_lowerCAmelCase : Tuple = np.random.RandomState(0 )
_lowerCAmelCase : Dict = pipe(
prompt=lowercase__ ,image=lowercase__ ,mask_image=lowercase__ ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowercase__ ,output_type='np' ,)
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : List[str] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[Any] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowerCAmelCase : str = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,subfolder='scheduler' ,revision='onnx' )
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,revision='onnx' ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowercase__ )
_lowerCAmelCase : Dict = '''A red cat sitting on a park bench'''
_lowerCAmelCase : int = np.random.RandomState(0 )
_lowerCAmelCase : int = pipe(
prompt=lowercase__ ,image=lowercase__ ,mask_image=lowercase__ ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowercase__ ,output_type='np' ,)
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Dict = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[int] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = 0.0 , _lowerCamelCase = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 0 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = int(a_ )
_lowerCAmelCase : Optional[int] = t // 3600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=300 ):
'''simple docstring'''
return f"""\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowerCAmelCase : List[str] = f"""{elt:.6f}""" if isinstance(a_ , a_ ) else str(a_ )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __UpperCamelCase :
_UpperCAmelCase = 5
_UpperCAmelCase = 0.2
def __init__( self ,_A ,_A = None ,_A = True ,_A = None ,_A = 300 ,):
'''simple docstring'''
_lowerCAmelCase : Dict = total
_lowerCAmelCase : Tuple = '''''' if prefix is None else prefix
_lowerCAmelCase : Tuple = leave
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : int = width
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : str = None
_lowerCAmelCase : List[Any] = None
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = value
if comment is not None:
_lowerCAmelCase : int = comment
if self.last_value is None:
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : List[str] = value
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Tuple = self.warmup
_lowerCAmelCase : Dict = 1
self.update_bar(__snake_case )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : int = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowerCAmelCase : Optional[int] = self.elapsed_time / (value - self.start_value)
else:
_lowerCAmelCase : Optional[int] = None
if value >= self.total:
_lowerCAmelCase : List[str] = self.total
_lowerCAmelCase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowerCAmelCase : List[str] = self.average_time_per_item * (self.total - value)
self.update_bar(__snake_case )
_lowerCAmelCase : Optional[Any] = value
_lowerCAmelCase : Tuple = current_time
if self.average_time_per_item is None:
_lowerCAmelCase : Optional[int] = 1
else:
_lowerCAmelCase : Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) ,1 )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case )
if self.elapsed_time is None:
_lowerCAmelCase : str = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_lowerCAmelCase : Optional[Any] = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_lowerCAmelCase : str = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowerCAmelCase : Union[str, Any] = disp.display(disp.HTML(self.html_code ) ,display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self ,_A ,_A=None ):
'''simple docstring'''
super().__init__(__snake_case )
_lowerCAmelCase : List[Any] = None if column_names is None else [column_names]
_lowerCAmelCase : Optional[int] = None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowerCAmelCase : List[Any] = disp.display(disp.HTML(self.html_code ) ,display_id=__snake_case )
else:
self.output.update(disp.HTML(self.html_code ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.inner_table is None:
_lowerCAmelCase : Union[str, Any] = [list(values.keys() ), list(values.values() )]
else:
_lowerCAmelCase : Optional[int] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__snake_case )
_lowerCAmelCase : List[Any] = columns
self.inner_table.append([values[c] for c in columns] )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=300 ):
'''simple docstring'''
_lowerCAmelCase : Any = NotebookProgressBar(__snake_case ,prefix=__snake_case ,parent=self ,width=__snake_case )
return self.child_bar
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = None
self.display()
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[int] = False
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : int = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Tuple = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_lowerCAmelCase : Optional[Any] = NotebookTrainingTracker(state.max_steps ,__snake_case )
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 ,comment=F"""Epoch {epoch}/{state.num_train_epochs}""" ,force_update=self._force_next_update ,)
_lowerCAmelCase : Optional[int] = False
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ,**_A ):
'''simple docstring'''
if not has_length(__snake_case ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowerCAmelCase : List[str] = self.training_tracker.add_child(len(__snake_case ) )
else:
_lowerCAmelCase : str = NotebookProgressBar(len(__snake_case ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowerCAmelCase : List[str] = None
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ,**_A ):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowerCAmelCase : Tuple = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowerCAmelCase : Dict = state.global_step
self.training_tracker.write_line(__snake_case )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A=None ,**_A ):
'''simple docstring'''
if self.training_tracker is not None:
_lowerCAmelCase : Optional[Any] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
_lowerCAmelCase : Any = log['''loss''']
break
if self.first_column == "Epoch":
_lowerCAmelCase : Tuple = int(state.epoch )
else:
_lowerCAmelCase : Optional[int] = state.global_step
_lowerCAmelCase : Tuple = '''eval'''
for k in metrics:
if k.endswith('_loss' ):
_lowerCAmelCase : Union[str, Any] = re.sub(r'\_loss$' ,'' ,__snake_case )
_lowerCAmelCase : List[Any] = metrics.pop('total_flos' ,__snake_case )
_lowerCAmelCase : Union[str, Any] = metrics.pop('epoch' ,__snake_case )
_lowerCAmelCase : List[Any] = metrics.pop(F"""{metric_key_prefix}_runtime""" ,__snake_case )
_lowerCAmelCase : Dict = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" ,__snake_case )
_lowerCAmelCase : List[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" ,__snake_case )
_lowerCAmelCase : List[str] = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" ,__snake_case )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
_lowerCAmelCase : List[str] = v
else:
_lowerCAmelCase : Any = k.split('_' )
_lowerCAmelCase : Optional[Any] = ''' '''.join([part.capitalize() for part in splits[1:]] )
_lowerCAmelCase : Union[str, Any] = v
self.training_tracker.write_line(__snake_case )
self.training_tracker.remove_child()
_lowerCAmelCase : Dict = None
# Evaluation takes a long time so we should force the next update.
_lowerCAmelCase : List[Any] = True
def __lowerCamelCase ( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
self.training_tracker.update(
state.global_step ,comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" ,force_update=__snake_case )
_lowerCAmelCase : Any = None
| 704 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase = F'''down_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase = F'''down_blocks.{i}.attentions.{j}.'''
_lowerCAmelCase = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase = F'''up_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase = F'''up_blocks.{i}.attentions.{j}.'''
_lowerCAmelCase = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase = F'''down_blocks.{i}.downsamplers.0.conv.'''
_lowerCAmelCase = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
_lowerCAmelCase = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase = """mid_block.attentions.0."""
_lowerCAmelCase = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase = F'''mid_block.resnets.{j}.'''
_lowerCAmelCase = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_lowerCAmelCase : Any = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_lowerCAmelCase : List[str] = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_lowerCAmelCase : Optional[int] = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = v
_lowerCAmelCase : int = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase = F'''encoder.down_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase = F'''down_blocks.{i}.downsamplers.0.'''
_lowerCAmelCase = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
_lowerCAmelCase = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase = F'''decoder.up_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase = F'''mid_block.resnets.{i}.'''
_lowerCAmelCase = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_lowerCAmelCase : List[str] = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_lowerCAmelCase : Optional[int] = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = v
_lowerCAmelCase : Dict = {v: vae_state_dict[k] for k, v in mapping.items()}
_lowerCAmelCase : Union[str, Any] = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
_lowerCAmelCase : Union[str, Any] = reshape_weight_for_sd(_lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
_lowerCAmelCase : str = k[: -len('.q_proj.weight' )]
_lowerCAmelCase : Dict = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
_lowerCAmelCase : Dict = [None, None, None]
_lowerCAmelCase : List[Any] = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
_lowerCAmelCase : Tuple = k[: -len('.q_proj.bias' )]
_lowerCAmelCase : List[Any] = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
_lowerCAmelCase : Union[str, Any] = [None, None, None]
_lowerCAmelCase : int = v
continue
_lowerCAmelCase : Union[str, Any] = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_lowerCAmelCase : List[Any] = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = torch.cat(_lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_lowerCAmelCase : Tuple = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = torch.cat(_lowerCamelCase )
return new_state_dict
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_lowerCAmelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_lowerCAmelCase = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase = load_file(unet_path, device="""cpu""")
else:
_lowerCAmelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_lowerCAmelCase = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_lowerCAmelCase = load_file(vae_path, device="""cpu""")
else:
_lowerCAmelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_lowerCAmelCase = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_lowerCAmelCase = load_file(text_enc_path, device="""cpu""")
else:
_lowerCAmelCase = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_lowerCAmelCase = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 705 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : str = TFAutoModel.from_pretrained(__a ,from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : List[str] = AutoModel.from_pretrained(__a ,from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : Optional[int] = TFAutoModelForPreTraining.from_pretrained(__a ,from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : Dict = AutoModelForPreTraining.from_pretrained(__a ,from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(__a ,from_pt=__a )
_lowerCAmelCase : int = TFAutoModelForCausalLM.from_pretrained(
__a ,output_loading_info=__a ,from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(__a ,from_tf=__a )
_lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained(
__a ,output_loading_info=__a ,from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(__a ,from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : List[str] = AutoModelWithLMHead.from_pretrained(__a ,from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(__a ,from_pt=__a )
_lowerCAmelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(
__a ,output_loading_info=__a ,from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : List[Any] = AutoModelForMaskedLM.from_pretrained(__a ,from_tf=__a )
_lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
__a ,output_loading_info=__a ,from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(__a ,from_pt=__a )
_lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
__a ,output_loading_info=__a ,from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__a ,from_tf=__a )
_lowerCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
__a ,output_loading_info=__a ,from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : str = TFAutoModelForSequenceClassification.from_pretrained(__a ,from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__a ,from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_lowerCAmelCase : int = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : List[Any] = TFAutoModelForQuestionAnswering.from_pretrained(__a ,from_pt=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
_lowerCAmelCase : str = AutoModelForQuestionAnswering.from_pretrained(__a ,from_tf=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a ,__a )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(__a ,from_pt=__a )
self.assertIsInstance(__a ,__a )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) ,1_4410 )
_lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(__a ,from_tf=__a )
self.assertIsInstance(__a ,__a )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) ,1_4410 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFAutoModelWithLMHead.from_pretrained(__a ,from_pt=__a )
self.assertIsInstance(__a ,__a )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) ,1_4410 )
_lowerCAmelCase : Any = AutoModelWithLMHead.from_pretrained(__a ,from_tf=__a )
self.assertIsInstance(__a ,__a )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__a ) ,1_4410 )
| 706 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( UpperCAmelCase_ ):
def __init__( self ,_A ,_A ,_A ,_A = None ,):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_snake_case ,vae=_snake_case ,scheduler=_snake_case )
# create a imagenet -> id dictionary for easier use
_lowerCAmelCase : int = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
_lowerCAmelCase : Tuple = int(_snake_case )
_lowerCAmelCase : Optional[int] = dict(sorted(self.labels.items() ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
_lowerCAmelCase : List[Any] = list(_snake_case )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self ,_A ,_A = 4.0 ,_A = None ,_A = 50 ,_A = "pil" ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : Any = len(_snake_case )
_lowerCAmelCase : Tuple = self.transformer.config.sample_size
_lowerCAmelCase : Optional[int] = self.transformer.config.in_channels
_lowerCAmelCase : Any = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) ,generator=_snake_case ,device=self.device ,dtype=self.transformer.dtype ,)
_lowerCAmelCase : Union[str, Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_lowerCAmelCase : Union[str, Any] = torch.tensor(_snake_case ,device=self.device ).reshape(-1 )
_lowerCAmelCase : List[Any] = torch.tensor([1000] * batch_size ,device=self.device )
_lowerCAmelCase : List[str] = torch.cat([class_labels, class_null] ,0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_lowerCAmelCase : Dict = latent_model_input[: len(_snake_case ) // 2]
_lowerCAmelCase : Dict = torch.cat([half, half] ,dim=0 )
_lowerCAmelCase : Union[str, Any] = self.scheduler.scale_model_input(_snake_case ,_snake_case )
_lowerCAmelCase : int = t
if not torch.is_tensor(_snake_case ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowerCAmelCase : int = latent_model_input.device.type == 'mps'
if isinstance(_snake_case ,_snake_case ):
_lowerCAmelCase : Dict = torch.floataa if is_mps else torch.floataa
else:
_lowerCAmelCase : int = torch.intaa if is_mps else torch.intaa
_lowerCAmelCase : Dict = torch.tensor([timesteps] ,dtype=_snake_case ,device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_lowerCAmelCase : Optional[int] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_lowerCAmelCase : Optional[int] = self.transformer(
_snake_case ,timestep=_snake_case ,class_labels=_snake_case ).sample
# perform guidance
if guidance_scale > 1:
_lowerCAmelCase, _lowerCAmelCase : List[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowerCAmelCase, _lowerCAmelCase : Dict = torch.split(_snake_case ,len(_snake_case ) // 2 ,dim=0 )
_lowerCAmelCase : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowerCAmelCase : List[str] = torch.cat([half_eps, half_eps] ,dim=0 )
_lowerCAmelCase : Tuple = torch.cat([eps, rest] ,dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = torch.split(_snake_case ,_snake_case ,dim=1 )
else:
_lowerCAmelCase : int = noise_pred
# compute previous image: x_t -> x_t-1
_lowerCAmelCase : Optional[int] = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ).prev_sample
if guidance_scale > 1:
_lowerCAmelCase, _lowerCAmelCase : int = latent_model_input.chunk(2 ,dim=0 )
else:
_lowerCAmelCase : str = latent_model_input
_lowerCAmelCase : Any = 1 / self.vae.config.scaling_factor * latents
_lowerCAmelCase : List[str] = self.vae.decode(_snake_case ).sample
_lowerCAmelCase : Any = (samples / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : List[str] = samples.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Tuple = self.numpy_to_pil(_snake_case )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_snake_case )
| 707 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase = StableUnCLIPImgaImgPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCAmelCase = frozenset([] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 32
_lowerCAmelCase : Union[str, Any] = embedder_hidden_size
# image encoding components
_lowerCAmelCase : Optional[int] = CLIPImageProcessor(crop_size=32 ,size=32 )
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowerCAmelCase ,projection_dim=_lowerCAmelCase ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=32 ,intermediate_size=37 ,patch_size=1 ,) )
# regular denoising components
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase )
_lowerCAmelCase : str = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=_lowerCAmelCase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type='projection' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=_lowerCAmelCase ,layers_per_block=1 ,upcast_attention=_lowerCAmelCase ,use_linear_projection=_lowerCAmelCase ,)
torch.manual_seed(0 )
_lowerCAmelCase : str = DDIMScheduler(
beta_schedule='scaled_linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,prediction_type='v_prediction' ,set_alpha_to_one=_lowerCAmelCase ,steps_offset=1 ,)
torch.manual_seed(0 )
_lowerCAmelCase : int = AutoencoderKL()
_lowerCAmelCase : List[str] = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ,_A=True ):
'''simple docstring'''
if str(_lowerCAmelCase ).startswith('mps' ):
_lowerCAmelCase : Dict = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase : str = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase : str = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if pil_image:
_lowerCAmelCase : str = input_image * 0.5 + 0.5
_lowerCAmelCase : Optional[int] = input_image.clamp(0 ,1 )
_lowerCAmelCase : Optional[int] = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
_lowerCAmelCase : Dict = DiffusionPipeline.numpy_to_pil(_lowerCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : int = StableUnCLIPImgaImgPipeline(**_lowerCAmelCase )
_lowerCAmelCase : Optional[int] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase : Any = self.get_dummy_inputs(_lowerCAmelCase )
inputs.update({'image_embeds': None} )
_lowerCAmelCase : Dict = sd_pipe(**_lowerCAmelCase ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Any = np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowerCAmelCase )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
_lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
_lowerCAmelCase : int = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' ,torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : str = pipe(_lowerCAmelCase ,'anime turle' ,generator=_lowerCAmelCase ,output_type='np' )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase ,_lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
_lowerCAmelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
_lowerCAmelCase : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' ,torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : Tuple = pipe(_lowerCAmelCase ,'anime turle' ,generator=_lowerCAmelCase ,output_type='np' )
_lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase ,_lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Dict = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : Union[str, Any] = pipe(
_lowerCAmelCase ,'anime turtle' ,num_inference_steps=2 ,output_type='np' ,)
_lowerCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase = 50 ):
'''simple docstring'''
_lowerCAmelCase : str = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''') | 709 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
debug_launcher(test_script.main )
def __lowerCamelCase ( self ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 710 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase = 1000000 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = set(range(3 , lowerCamelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase_ , lowerCamelCase_ ) ) )
_lowerCAmelCase : Optional[Any] = [float(lowerCamelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase :
def __init__( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = question_encoder
_lowerCAmelCase : Union[str, Any] = generator
_lowerCAmelCase : Dict = self.question_encoder
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if os.path.isfile(__A ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__A ,exist_ok=__A )
_lowerCAmelCase : Tuple = os.path.join(__A ,'question_encoder_tokenizer' )
_lowerCAmelCase : Optional[Any] = os.path.join(__A ,'generator_tokenizer' )
self.question_encoder.save_pretrained(__A )
self.generator.save_pretrained(__A )
@classmethod
def __lowerCamelCase ( cls ,_A ,**_A ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : str = kwargs.pop('config' ,__A )
if config is None:
_lowerCAmelCase : Union[str, Any] = RagConfig.from_pretrained(__A )
_lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
__A ,config=config.question_encoder ,subfolder='question_encoder_tokenizer' )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
__A ,config=config.generator ,subfolder='generator_tokenizer' )
return cls(question_encoder=__A ,generator=__A )
def __call__( self ,*_A ,**_A ):
'''simple docstring'''
return self.current_tokenizer(*__A ,**__A )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
return self.generator.batch_decode(*__A ,**__A )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
return self.generator.decode(*__A ,**__A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.question_encoder
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.generator
def __lowerCamelCase ( self ,_A ,_A = None ,_A = None ,_A = None ,_A = "longest" ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' ,__A ,)
if max_length is None:
_lowerCAmelCase : Union[str, Any] = self.current_tokenizer.model_max_length
_lowerCAmelCase : Optional[Any] = self(
__A ,add_special_tokens=__A ,return_tensors=__A ,max_length=__A ,padding=__A ,truncation=__A ,**__A ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : Dict = self(
text_target=__A ,add_special_tokens=__A ,return_tensors=__A ,padding=__A ,max_length=__A ,truncation=__A ,**__A ,)
_lowerCAmelCase : List[str] = labels['input_ids']
return model_inputs
| 713 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 0 |
"""simple docstring"""
import qiskit
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_lowerCAmelCase : int = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCAmelCase : Dict = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 714 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return getitem, k
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return setitem, k, v
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return delitem, k
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase ):
'''simple docstring'''
try:
return fun(_UpperCAmelCase , *_UpperCAmelCase ), None
except Exception as e:
return None, e
_lowerCAmelCase = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
_lowerCAmelCase = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
_lowerCAmelCase = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
_lowerCAmelCase = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
_lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = HashMap(initial_block_size=4 )
_lowerCAmelCase : List[Any] = {}
for _, (fun, *args) in enumerate(_UpperCAmelCase ):
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
_lowerCAmelCase, _lowerCAmelCase : Dict = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
assert my_res == py_res
assert str(_UpperCAmelCase ) == str(_UpperCAmelCase )
assert set(_UpperCAmelCase ) == set(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase__ ( ):
'''simple docstring'''
def is_public(_lowerCamelCase ) -> bool:
return not name.startswith('_' )
_lowerCAmelCase : Optional[Any] = {name for name in dir({} ) if is_public(_UpperCAmelCase )}
_lowerCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 715 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase = field(default=UpperCamelCase_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
_UpperCAmelCase = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCAmelCase = field(
default=UpperCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
_lowerCAmelCase : List[str] = import_module('tasks' )
try:
_lowerCAmelCase : Any = getattr(_lowerCamelCase , model_args.task_type )
_lowerCAmelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_lowerCAmelCase : Optional[Any] = token_classification_task.get_labels(data_args.labels )
_lowerCAmelCase : Dict[int, str] = dict(enumerate(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid={label: i for i, label in enumerate(_lowerCamelCase )} , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_lowerCAmelCase : Optional[int] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase : List[Any] = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase : int = (
TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowerCamelCase , _lowerCamelCase ) -> Tuple[List[int], List[int]]:
_lowerCAmelCase : str = np.argmax(_lowerCamelCase , axis=2 )
_lowerCAmelCase : int = preds.shape
_lowerCAmelCase : Dict = [[] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Union[str, Any] = [[] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowerCamelCase ) -> Dict:
_lowerCAmelCase : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowerCamelCase , _lowerCamelCase ),
"precision": precision_score(_lowerCamelCase , _lowerCamelCase ),
"recall": recall_score(_lowerCamelCase , _lowerCamelCase ),
"f1": fa_score(_lowerCamelCase , _lowerCamelCase ),
}
# Data collator
_lowerCAmelCase : Optional[int] = DataCollatorWithPadding(_lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase : Optional[Any] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , data_collator=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase : List[str] = trainer.evaluate()
_lowerCAmelCase : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowerCamelCase , _lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowerCamelCase )
# Predict
if training_args.do_predict:
_lowerCAmelCase : str = TokenClassificationDataset(
token_classification_task=_lowerCamelCase , data_dir=data_args.data_dir , tokenizer=_lowerCamelCase , labels=_lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_lowerCAmelCase : str = trainer.predict(_lowerCamelCase )
_lowerCAmelCase : List[str] = align_predictions(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _lowerCamelCase , _lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_lowerCAmelCase : List[str] = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return results
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 716 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 0 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , _lowercase ).groups()[0]
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A=None ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = file_names
_lowerCAmelCase : Any = image_transform
_lowerCAmelCase : List[str] = label_to_id
def __len__( self ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.file_names[idx]
_lowerCAmelCase : List[Any] = PIL.Image.open(__A )
_lowerCAmelCase : Optional[Any] = raw_image.convert('RGB' )
if self.image_transform is not None:
_lowerCAmelCase : List[Any] = self.image_transform(__A )
_lowerCAmelCase : Optional[int] = extract_label(__A )
if self.label_to_id is not None:
_lowerCAmelCase : str = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if args.with_tracking:
_lowerCAmelCase : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
_lowerCAmelCase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Any = config['lr']
_lowerCAmelCase : List[Any] = int(config['num_epochs'] )
_lowerCAmelCase : Any = int(config['seed'] )
_lowerCAmelCase : List[Any] = int(config['batch_size'] )
_lowerCAmelCase : Union[str, Any] = config['image_size']
if not isinstance(_lowercase , (list, tuple) ):
_lowerCAmelCase : List[Any] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
_lowerCAmelCase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowerCAmelCase : List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
_lowerCAmelCase : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowerCAmelCase : int = os.path.split(_lowercase )[-1].split('.' )[0]
accelerator.init_trackers(_lowercase , _lowercase )
# Grab all the image filenames
_lowerCAmelCase : Union[str, Any] = [os.path.join(args.data_dir , _lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
_lowerCAmelCase : List[str] = [extract_label(_lowercase ) for fname in file_names]
_lowerCAmelCase : Tuple = list(set(_lowercase ) )
id_to_label.sort()
_lowerCAmelCase : int = {lbl: i for i, lbl in enumerate(_lowercase )}
# Set the seed before splitting the data.
np.random.seed(_lowercase )
torch.manual_seed(_lowercase )
torch.cuda.manual_seed_all(_lowercase )
# Split our filenames between train and validation
_lowerCAmelCase : Optional[int] = np.random.permutation(len(_lowercase ) )
_lowerCAmelCase : Dict = int(0.8 * len(_lowercase ) )
_lowerCAmelCase : Union[str, Any] = random_perm[:cut]
_lowerCAmelCase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowerCAmelCase : Any = Compose([RandomResizedCrop(_lowercase , scale=(0.5, 1.0) ), ToTensor()] )
_lowerCAmelCase : List[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowercase , label_to_id=_lowercase )
# For evaluation, we use a deterministic Resize
_lowerCAmelCase : Tuple = Compose([Resize(_lowercase ), ToTensor()] )
_lowerCAmelCase : str = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowercase , label_to_id=_lowercase )
# Instantiate dataloaders.
_lowerCAmelCase : Tuple = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
_lowerCAmelCase : Any = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : int = create_model('resnet50d' , pretrained=_lowercase , num_classes=len(_lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : str = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowerCAmelCase : str = False
for param in model.get_classifier().parameters():
_lowerCAmelCase : Dict = True
# We normalize the batches of images to be a bit faster.
_lowerCAmelCase : Optional[Any] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
_lowerCAmelCase : Union[str, Any] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowerCAmelCase : Dict = OneCycleLR(optimizer=_lowercase , max_lr=_lowercase , epochs=_lowercase , steps_per_epoch=len(_lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : List[str] = 0
# We also need to keep track of the starting epoch so files are named properly
_lowerCAmelCase : Optional[Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase : Tuple = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowerCAmelCase : Optional[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowerCAmelCase : Optional[int] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowerCAmelCase : Optional[Any] = os.path.splitext(_lowercase )[0]
if "epoch" in training_difference:
_lowerCAmelCase : Tuple = int(training_difference.replace('epoch_' , '' ) ) + 1
_lowerCAmelCase : List[str] = None
else:
_lowerCAmelCase : Union[str, Any] = int(training_difference.replace('step_' , '' ) )
_lowerCAmelCase : List[str] = resume_step // len(_lowercase )
resume_step -= starting_epoch * len(_lowercase )
# Now we train the model
for epoch in range(_lowercase , _lowercase ):
model.train()
if args.with_tracking:
_lowerCAmelCase : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowerCAmelCase : str = accelerator.skip_first_batches(_lowercase , _lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowerCAmelCase : Dict = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCAmelCase : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCAmelCase : int = (batch['image'] - mean) / std
_lowerCAmelCase : Optional[int] = model(_lowercase )
_lowerCAmelCase : List[Any] = torch.nn.functional.cross_entropy(_lowercase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase : List[str] = f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowerCAmelCase : Any = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
model.eval()
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Dict = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCAmelCase : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCAmelCase : Optional[int] = (batch['image'] - mean) / std
with torch.no_grad():
_lowerCAmelCase : int = model(_lowercase )
_lowerCAmelCase : int = outputs.argmax(dim=-1 )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['label']) )
_lowerCAmelCase : str = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowerCAmelCase : Optional[int] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(_lowercase ),
'epoch': epoch,
} , step=_lowercase , )
if checkpointing_steps == "epoch":
_lowerCAmelCase : Optional[int] = f"""epoch_{epoch}"""
if args.output_dir is not None:
_lowerCAmelCase : Optional[int] = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_lowercase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_lowercase , default=_lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_lowercase , default=_lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_lowercase , default=_lowercase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
_lowerCAmelCase : str = parser.parse_args()
_lowerCAmelCase : Tuple = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=32 ,_A=3 ,_A=4 ,_A=[10, 20, 30, 40] ,_A=[2, 2, 3, 2] ,_A=True ,_A=True ,_A=37 ,_A="gelu" ,_A=10 ,_A=0.0_2 ,_A=["stage2", "stage3", "stage4"] ,_A=[2, 3, 4] ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Any = num_stages
_lowerCAmelCase : Union[str, Any] = hidden_sizes
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Optional[Any] = num_labels
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Dict = out_features
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Any = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_lowercase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = ConvNextVaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : Tuple = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = ConvNextVaForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : Dict = model(_lowercase ,labels=_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ConvNextVaBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : Optional[int] = model(_lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Tuple = ConvNextVaBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowerCAmelCase : List[str] = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : str = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = ConvNextVaModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self ,config_class=_lowercase ,has_text_modality=_lowercase ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCAmelCase : List[str] = True
if model_class.__name__ in [
*get_values(_lowercase ),
*get_values(_lowercase ),
]:
continue
_lowerCAmelCase : Optional[int] = model_class(_lowercase )
model.to(_lowercase )
model.train()
_lowerCAmelCase : Tuple = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
_lowerCAmelCase : Dict = model(**_lowercase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_with_labels()
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[Any] = True
if (
model_class.__name__
in [*get_values(_lowercase ), *get_values(_lowercase )]
or not model_class.supports_gradient_checkpointing
):
continue
_lowerCAmelCase : Optional[int] = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
_lowerCAmelCase : Optional[int] = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
_lowerCAmelCase : Any = model(**_lowercase ).loss
loss.backward()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(_lowercase )
_lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[str] = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : Any = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**self._prepare_for_class(_lowercase ,_lowercase ) )
_lowerCAmelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_lowercase ,_lowercase ,_lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_lowercase ,_lowercase ,_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = ConvNextVaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_lowercase )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Union[str, Any] = prepare_img()
_lowerCAmelCase : Union[str, Any] = preprocessor(images=_lowercase ,return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(**_lowercase )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_lowercase )
_lowerCAmelCase : Any = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowercase ,atol=1E-4 ) )
| 718 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_lowerCAmelCase = input("""Enter image url: """).strip()
print(F'''Downloading image from {url} ...''')
_lowerCAmelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_lowerCAmelCase = soup.find("""meta""", {"""property""": """og:image"""})['content']
_lowerCAmelCase = requests.get(image_url).content
_lowerCAmelCase = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 719 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = True, True
_lowerCAmelCase : Any = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return path
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = -1
for i in range(lowerCAmelCase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_lowerCAmelCase : Union[str, Any] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = check_circuit_or_path(lowerCAmelCase__ , lowerCAmelCase__ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
_lowerCAmelCase : Union[str, Any] = 1
if check == 2:
_lowerCAmelCase : Union[str, Any] = odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
_lowerCAmelCase : Any = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_lowerCAmelCase : Tuple = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_lowerCAmelCase : Tuple = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_lowerCAmelCase : Any = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_lowerCAmelCase : Tuple = {
1: [],
2: []
# all degree is zero
}
_lowerCAmelCase : List[Any] = 10
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
check_euler(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 720 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for e in env_keys:
_lowerCAmelCase : Any = int(os.environ.get(_lowerCamelCase , -1 ) )
if val >= 0:
return val
return default
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : str = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return strtobool(_lowerCamelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase="no" ):
'''simple docstring'''
_lowerCAmelCase : str = os.environ.get(_lowerCamelCase , str(_lowerCamelCase ) )
return value
| 721 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 0 |
"""simple docstring"""
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=64 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=4 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Optional[Any] = use_input_mask
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = embedding_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = num_choices
_lowerCAmelCase : Optional[Any] = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : Tuple = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : Any = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_A ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MegatronBertModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Any = model(_A ,attention_mask=_A ,token_type_ids=_A )
_lowerCAmelCase : Union[str, Any] = model(_A ,token_type_ids=_A )
_lowerCAmelCase : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MegatronBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MegatronBertForCausalLM(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MegatronBertForNextSentencePrediction(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = MegatronBertForPreTraining(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[int] = model(
_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A ,next_sentence_label=_A ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = MegatronBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
_A ,attention_mask=_A ,token_type_ids=_A ,start_positions=_A ,end_positions=_A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Tuple = MegatronBertForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Any = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : str = MegatronBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : str = model(_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_choices
_lowerCAmelCase : Dict = MegatronBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCAmelCase : List[Any] = model(
_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
), (
_lowerCAmelCase
),
) : List[Any] = config_and_inputs
_lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_UpperCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
# test_resize_embeddings = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ,_A ,_A ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super()._prepare_for_class(_A ,_A ,return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
_lowerCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=_A )
_lowerCAmelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_A )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = MegatronBertModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self ,config_class=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_A )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return torch.tensor(
_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase , )
_lowerCAmelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
@unittest.skip('Model is not available.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_lowerCAmelCase : List[str] = os.path.join(os.environ['MYDIR'] ,_A )
_lowerCAmelCase : Dict = MegatronBertModel.from_pretrained(_A )
model.to(_A )
model.half()
_lowerCAmelCase : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(_A )[0]
_lowerCAmelCase : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape ,_A )
_lowerCAmelCase : List[Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
_lowerCAmelCase : int = output[0, ii, jj]
_lowerCAmelCase : Optional[Any] = expected[3 * ii + jj]
_lowerCAmelCase : Dict = 'ii={} jj={} a={} b={}'.format(_A ,_A ,_A ,_A )
self.assertTrue(math.isclose(_A ,_A ,rel_tol=_A ,abs_tol=_A ) ,msg=_A )
| 700 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_lowerCAmelCase : int = k.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if k.startswith('encoder' ):
_lowerCAmelCase : int = k.replace('.attn' , '.self_attn' )
_lowerCAmelCase : Optional[Any] = k.replace('norm1' , 'self_attn_layer_norm' )
_lowerCAmelCase : Union[str, Any] = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
_lowerCAmelCase : List[str] = k.replace('norm1' , 'self_attn_layer_norm' )
_lowerCAmelCase : List[str] = k.replace('norm2' , 'encoder_attn_layer_norm' )
_lowerCAmelCase : Optional[int] = k.replace('norm3' , 'final_layer_norm' )
return k
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_lowerCAmelCase : Optional[int] = sd.pop(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Any = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
_lowerCAmelCase : Any = v
_lowerCAmelCase = ["""START"""]
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
_lowerCAmelCase : List[Any] = model["model"]
_lowerCAmelCase : List[Any] = BlenderbotConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : List[str] = BlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : List[Any] = m.model.state_dict().keys()
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_lowerCAmelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_lowerCAmelCase : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(SCREAMING_SNAKE_CASE_ )
m.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
m.half()
m.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
_lowerCAmelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCamelCase__ ( _lowerCamelCase = 3 ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(SCREAMING_SNAKE_CASE_ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
_lowerCAmelCase : Any = QuantumRegister(SCREAMING_SNAKE_CASE_ , 'qr' )
_lowerCAmelCase : List[Any] = ClassicalRegister(SCREAMING_SNAKE_CASE_ , 'cr' )
_lowerCAmelCase : Optional[int] = QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Optional[Any] = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# simulate with 10000 shots
_lowerCAmelCase : str = Aer.get_backend('qasm_simulator' )
_lowerCAmelCase : List[Any] = execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=10000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( __A ):
def __init__( self ,_A ,_A ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : int = feature_size
_lowerCAmelCase : List[Any] = sampling_rate
_lowerCAmelCase : Dict = padding_value
_lowerCAmelCase : List[Any] = kwargs.pop('padding_side' ,'right' )
_lowerCAmelCase : str = kwargs.pop('return_attention_mask' ,_A )
super().__init__(**_A )
def __lowerCamelCase ( self ,_A ,_A = True ,_A = None ,_A = False ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
if isinstance(_A ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_lowerCAmelCase : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
_lowerCAmelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
_lowerCAmelCase : str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_A ) == 0:
if return_attention_mask:
_lowerCAmelCase : Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowerCAmelCase : Optional[Any] = required_input[0]
if isinstance(_A ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowerCAmelCase : str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_A ):
_lowerCAmelCase : List[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_A ):
_lowerCAmelCase : List[str] = 'tf'
elif is_torch_tensor(_A ):
_lowerCAmelCase : Tuple = 'pt'
elif isinstance(_A ,(int, float, list, tuple, np.ndarray) ):
_lowerCAmelCase : str = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(_A )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_lowerCAmelCase : Dict = to_numpy(_A )
else:
_lowerCAmelCase : str = [to_numpy(_A ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowerCAmelCase : List[Any] = self._get_padding_strategies(padding=_A ,max_length=_A )
_lowerCAmelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
_lowerCAmelCase : List[str] = len(_A )
if not all(len(_A ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_lowerCAmelCase : List[str] = []
for i in range(_A ):
_lowerCAmelCase : Optional[int] = {k: v[i] for k, v in processed_features.items()}
# truncation
_lowerCAmelCase : Tuple = self._truncate(
_A ,max_length=_A ,pad_to_multiple_of=_A ,truncation=_A ,)
truncated_inputs.append(_A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowerCAmelCase : int = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowerCAmelCase : Optional[Any] = PaddingStrategy.MAX_LENGTH
_lowerCAmelCase : List[str] = {}
for i in range(_A ):
# padding
_lowerCAmelCase : int = self._pad(
truncated_inputs[i] ,max_length=_A ,padding_strategy=_A ,pad_to_multiple_of=_A ,return_attention_mask=_A ,)
for key, value in outputs.items():
if key not in batch_outputs:
_lowerCAmelCase : Optional[int] = []
if value.dtype is np.dtype(np.floataa ):
_lowerCAmelCase : Any = value.astype(np.floataa )
batch_outputs[key].append(_A )
return BatchFeature(_A ,tensor_type=_A )
def __lowerCamelCase ( self ,_A ,_A = None ,_A = PaddingStrategy.DO_NOT_PAD ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowerCAmelCase : List[Any] = len(_A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCAmelCase : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCAmelCase : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowerCAmelCase : List[Any] = np.ones(len(_A ) ,dtype=np.intaa )
if needs_to_be_padded:
_lowerCAmelCase : int = max_length - len(_A )
if self.padding_side == "right":
if return_attention_mask:
_lowerCAmelCase : Union[str, Any] = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_lowerCAmelCase : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowerCAmelCase : str = np.pad(
_A ,_A ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowerCAmelCase : Any = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_lowerCAmelCase : List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowerCAmelCase : Union[str, Any] = np.pad(
_A ,_A ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def __lowerCamelCase ( self ,_A ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_lowerCAmelCase : Any = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowerCAmelCase : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowerCAmelCase : Optional[int] = len(_A ) > max_length
if needs_to_be_truncated:
_lowerCAmelCase : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowerCAmelCase : Dict = processed_features['attention_mask'][:max_length]
return processed_features
def __lowerCamelCase ( self ,_A=False ,_A=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
_lowerCAmelCase : Union[str, Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_A ,_A ):
_lowerCAmelCase : Optional[int] = PaddingStrategy(_A )
elif isinstance(_A ,_A ):
_lowerCAmelCase : str = padding
else:
_lowerCAmelCase : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 703 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 0 |
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__snake_case ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if index == len(__snake_case ):
return True
# Recursive Step
for i in range(__snake_case ):
if valid_coloring(graph[index] , __snake_case , __snake_case ):
# Color current vertex
_lowerCAmelCase : str = i
# Validate coloring
if util_color(__snake_case , __snake_case , __snake_case , index + 1 ):
return True
# Backtrack
_lowerCAmelCase : Any = -1
return False
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [-1] * len(__snake_case )
if util_color(__snake_case , __snake_case , __snake_case , 0 ):
return colored_vertices
return []
| 704 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCAmelCase : List[Any] = [p / w for p, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCAmelCase : List[str] = sorted(lowerCAmelCase__ )
# declaring useful variables
_lowerCAmelCase : List[str] = len(lowerCAmelCase__ )
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : Optional[int] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCAmelCase : Optional[Any] = sorted_profit_by_weight[length - i - 1]
_lowerCAmelCase : Union[str, Any] = profit_by_weight.index(lowerCAmelCase__ )
_lowerCAmelCase : Any = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
_lowerCAmelCase = [int(x) for x in input("""Input profits separated by spaces: """).split()]
_lowerCAmelCase = [int(x) for x in input("""Input weights separated by spaces: """).split()]
_lowerCAmelCase = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 705 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCAmelCase_ , n - 1 , lowerCAmelCase_ ) * a) % mod
else:
_lowerCAmelCase : Tuple = binary_exponentiation(lowerCAmelCase_ , n / 2 , lowerCAmelCase_ )
return (b * b) % mod
# a prime number
_lowerCAmelCase = 7_0_1
_lowerCAmelCase = 1_0_0_0_0_0_0_0_0_0
_lowerCAmelCase = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 706 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = [0] * len(_snake_case )
for i in range(1 , len(_snake_case ) ):
# use last results for better performance - dynamic programming
_lowerCAmelCase : str = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowerCAmelCase : int = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowerCAmelCase : Optional[Any] = j
return prefix_result
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return max(prefix_function(_snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Tuple = OmegaConf.load(_A )
if display:
print(yaml.dump(OmegaConf.to_container(_A ) ) )
return config
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Optional[Any] = './model_checkpoints/vqgan_only.yaml'
_lowerCAmelCase : int = load_config(_A , display=_A )
_lowerCAmelCase : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : List[str] = './model_checkpoints/vqgan_only.pt'
_lowerCAmelCase : Union[str, Any] = torch.load(_A , map_location=_A )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : Optional[Any] = sd['state_dict']
model.load_state_dict(_A , strict=_A )
model.to(_A )
del sd
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[Any] = model.encode(_A )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
_lowerCAmelCase : Union[str, Any] = model.decode(_A )
return xrec
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : str = string.rsplit('.' , 1 )
if reload:
_lowerCAmelCase : int = importlib.import_module(_A )
importlib.reload(_A )
return getattr(importlib.import_module(_A , package=_A ) , cls )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : int = instantiate_from_config(_A )
if sd is not None:
model.load_state_dict(_A )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[Any] = torch.load(_A , map_location='cpu' )
_lowerCAmelCase : Union[str, Any] = pl_sd['global_step']
print(f"""loaded model from global step {global_step}.""" )
else:
_lowerCAmelCase : Any = {'state_dict': None}
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=_A , eval_mode=_A )['model']
return model, global_step
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 0 |
"""simple docstring"""
import argparse
import datetime
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_lowerCAmelCase : Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(A_ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
_lowerCAmelCase : List[str] = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
_lowerCAmelCase : Dict = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_lowerCAmelCase : Dict = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_lowerCAmelCase : Optional[Any] = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_lowerCAmelCase : Optional[Any] = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_lowerCAmelCase : Union[str, Any] = datetime.date(int(A_ ) , int(A_ ) , int(A_ ) )
# Start math
if m <= 2:
_lowerCAmelCase : Any = y - 1
_lowerCAmelCase : str = m + 12
# maths var
_lowerCAmelCase : Tuple = int(str(A_ )[:2] )
_lowerCAmelCase : Tuple = int(str(A_ )[2:] )
_lowerCAmelCase : Union[str, Any] = int(2.6 * m - 5.39 )
_lowerCAmelCase : Optional[Any] = int(c / 4 )
_lowerCAmelCase : Tuple = int(k / 4 )
_lowerCAmelCase : Optional[Any] = int(d + k )
_lowerCAmelCase : List[str] = int(t + u + v + x )
_lowerCAmelCase : List[str] = int(z - (2 * c) )
_lowerCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_lowerCAmelCase : Any = f"""Your date {date_input}, is a {days[str(A_ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
_lowerCAmelCase = parser.parse_args()
zeller(args.date_input) | 709 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set()
_lowerCAmelCase : Union[str, Any] = []
def parse_line(_lowerCamelCase ):
for line in fp:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : str = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = '\n'.join(_lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(_lowerCamelCase )
buffer.clear()
continue
else:
_lowerCAmelCase : Optional[Any] = line.strip()
buffer.append(_lowerCamelCase )
if from_gh:
for filename in os.listdir(_lowerCamelCase ):
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
else:
try:
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_lowerCamelCase ) as fp:
parse_line(_lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = set()
_lowerCAmelCase : List[str] = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_lowerCamelCase , _lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return values.split(',' )
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
_lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 710 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 0 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase = F'''down_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase = F'''down_blocks.{i}.attentions.{j}.'''
_lowerCAmelCase = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase = F'''up_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase = F'''up_blocks.{i}.attentions.{j}.'''
_lowerCAmelCase = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase = F'''down_blocks.{i}.downsamplers.0.conv.'''
_lowerCAmelCase = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
_lowerCAmelCase = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase = """mid_block.attentions.0."""
_lowerCAmelCase = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase = F'''mid_block.resnets.{j}.'''
_lowerCAmelCase = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_lowerCAmelCase : Any = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_lowerCAmelCase : Tuple = v.replace(__snake_case , __snake_case )
_lowerCAmelCase : Tuple = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_lowerCAmelCase : List[Any] = v.replace(__snake_case , __snake_case )
_lowerCAmelCase : Union[str, Any] = v
_lowerCAmelCase : Dict = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase = F'''encoder.down_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase = F'''down_blocks.{i}.downsamplers.0.'''
_lowerCAmelCase = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
_lowerCAmelCase = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase = F'''decoder.up_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase = F'''mid_block.resnets.{i}.'''
_lowerCAmelCase = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_lowerCAmelCase : str = v.replace(__snake_case , __snake_case )
_lowerCAmelCase : List[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_lowerCAmelCase : Optional[Any] = v.replace(__snake_case , __snake_case )
_lowerCAmelCase : Optional[Any] = v
_lowerCAmelCase : str = {v: vae_state_dict[k] for k, v in mapping.items()}
_lowerCAmelCase : int = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
_lowerCAmelCase : Optional[int] = reshape_weight_for_sd(__snake_case )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
_lowerCAmelCase : str = {}
_lowerCAmelCase : Optional[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
_lowerCAmelCase : Any = k[: -len('.q_proj.weight' )]
_lowerCAmelCase : List[Any] = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
_lowerCAmelCase : int = [None, None, None]
_lowerCAmelCase : Union[str, Any] = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
_lowerCAmelCase : List[str] = k[: -len('.q_proj.bias' )]
_lowerCAmelCase : Optional[Any] = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
_lowerCAmelCase : str = [None, None, None]
_lowerCAmelCase : Union[str, Any] = v
continue
_lowerCAmelCase : Tuple = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , __snake_case )
_lowerCAmelCase : Any = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_lowerCAmelCase : Optional[int] = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , __snake_case )
_lowerCAmelCase : Optional[int] = torch.cat(__snake_case )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
_lowerCAmelCase : Tuple = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , __snake_case )
_lowerCAmelCase : List[Any] = torch.cat(__snake_case )
return new_state_dict
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_lowerCAmelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_lowerCAmelCase = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase = load_file(unet_path, device="""cpu""")
else:
_lowerCAmelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_lowerCAmelCase = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_lowerCAmelCase = load_file(vae_path, device="""cpu""")
else:
_lowerCAmelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_lowerCAmelCase = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_lowerCAmelCase = load_file(text_enc_path, device="""cpu""")
else:
_lowerCAmelCase = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_lowerCAmelCase = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 711 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=7 ,_A=3 ,_A=10 ,_A=18 ,_A=30 ,_A=400 ,_A=True ,_A=None ,_A=True ,_A=[0.5, 0.5, 0.5] ,_A=[0.5, 0.5, 0.5] ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 18}
_lowerCAmelCase : List[str] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = num_frames
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Any = min_resolution
_lowerCAmelCase : Optional[int] = max_resolution
_lowerCAmelCase : str = do_resize
_lowerCAmelCase : str = size
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : Optional[int] = image_mean
_lowerCAmelCase : Dict = image_std
_lowerCAmelCase : List[Any] = crop_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase = VivitImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = VivitImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ ,'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_ ,'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase_ ,'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_ ,'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_ ,'size' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
_lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_lowerCAmelCase : int = prepare_video_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase_ )
for video in video_inputs:
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertIsInstance(video[0] ,Image.Image )
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Dict = image_processing(lowerCamelCase_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : int = prepare_video_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase_ ,numpify=lowerCamelCase_ )
for video in video_inputs:
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertIsInstance(video[0] ,np.ndarray )
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Union[str, Any] = image_processing(lowerCamelCase_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase_ ,torchify=lowerCamelCase_ )
for video in video_inputs:
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertIsInstance(video[0] ,torch.Tensor )
# Test not batched input
_lowerCAmelCase : int = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(lowerCamelCase_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 712 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCAmelCase = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : Any = pipe.dual_guided(
prompt='first prompt' ,image=UpperCamelCase_ ,text_to_image_strength=0.7_5 ,generator=UpperCamelCase_ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase_ ,torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase : Tuple = generator.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe.dual_guided(
prompt='first prompt' ,image=UpperCamelCase_ ,text_to_image_strength=0.7_5 ,generator=UpperCamelCase_ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase : str = 'cyberpunk 2077'
_lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = pipe.dual_guided(
prompt=UpperCamelCase_ ,image=UpperCamelCase_ ,text_to_image_strength=0.7_5 ,generator=UpperCamelCase_ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
_lowerCAmelCase : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : str = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowerCAmelCase : Optional[Any] = 'A painting of a squirrel eating a burger '
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe.text_to_image(
prompt=UpperCamelCase_ ,generator=UpperCamelCase_ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
_lowerCAmelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
_lowerCAmelCase : List[Any] = pipe.image_variation(UpperCamelCase_ ,generator=UpperCamelCase_ ,output_type='numpy' ).images
_lowerCAmelCase : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 713 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class __UpperCamelCase ( _UpperCamelCase ):
_UpperCAmelCase = "codegen"
_UpperCAmelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self ,_A=5_0400 ,_A=2048 ,_A=2048 ,_A=4096 ,_A=28 ,_A=16 ,_A=64 ,_A=None ,_A="gelu_new" ,_A=0.0 ,_A=0.0 ,_A=0.0 ,_A=1E-5 ,_A=0.0_2 ,_A=True ,_A=5_0256 ,_A=5_0256 ,_A=False ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : int = n_ctx
_lowerCAmelCase : Tuple = n_positions
_lowerCAmelCase : List[Any] = n_embd
_lowerCAmelCase : List[str] = n_layer
_lowerCAmelCase : Any = n_head
_lowerCAmelCase : Any = n_inner
_lowerCAmelCase : Union[str, Any] = rotary_dim
_lowerCAmelCase : List[str] = activation_function
_lowerCAmelCase : Optional[int] = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : Optional[Any] = attn_pdrop
_lowerCAmelCase : List[str] = layer_norm_epsilon
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : int = use_cache
_lowerCAmelCase : Any = bos_token_id
_lowerCAmelCase : Dict = eos_token_id
super().__init__(
bos_token_id=_A ,eos_token_id=_A ,tie_word_embeddings=_A ,**_A )
class __UpperCamelCase ( _UpperCamelCase ):
def __init__( self ,_A ,_A = "default" ,_A = None ,_A = False ,):
'''simple docstring'''
super().__init__(_A ,task=_A ,patching_specs=_A ,use_past=_A )
if not getattr(self._config ,'pad_token_id' ,_A ):
# TODO: how to do that better?
_lowerCAmelCase : Union[str, Any] = 0
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A ,direction='inputs' )
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self._config.n_head
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super(_A ,self ).generate_dummy_inputs(
_A ,batch_size=_A ,seq_length=_A ,is_pair=_A ,framework=_A )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Optional[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Union[str, Any] = seqlen + 2
_lowerCAmelCase : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Optional[Any] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Dict = common_inputs["attention_mask"]
if self.use_past:
_lowerCAmelCase : str = ordered_inputs["attention_mask"].dtype
_lowerCAmelCase : Any = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A ,_A ,dtype=_A )] ,dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 13
| 714 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return getitem, k
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return setitem, k, v
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return delitem, k
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , *_lowerCamelCase ):
'''simple docstring'''
try:
return fun(a__ , *a__ ), None
except Exception as e:
return None, e
_lowerCAmelCase = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
_lowerCAmelCase = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
_lowerCAmelCase = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
_lowerCAmelCase = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
_lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = HashMap(initial_block_size=4 )
_lowerCAmelCase : List[Any] = {}
for _, (fun, *args) in enumerate(a__ ):
_lowerCAmelCase, _lowerCAmelCase : Any = _run_operation(a__ , a__ , *a__ )
_lowerCAmelCase, _lowerCAmelCase : Any = _run_operation(a__ , a__ , *a__ )
assert my_res == py_res
assert str(a__ ) == str(a__ )
assert set(a__ ) == set(a__ )
assert len(a__ ) == len(a__ )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase__ ( ):
'''simple docstring'''
def is_public(_lowerCamelCase ) -> bool:
return not name.startswith('_' )
_lowerCAmelCase : Optional[int] = {name for name in dir({} ) if is_public(a__ )}
_lowerCAmelCase : List[Any] = {name for name in dir(HashMap() ) if is_public(a__ )}
assert dict_public_names > hash_public_names
| 715 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 0 |
"""simple docstring"""
_lowerCAmelCase = 0 # The first color of the flag.
_lowerCAmelCase = 1 # The second color of the flag.
_lowerCAmelCase = 2 # The third color of the flag.
_lowerCAmelCase = (red, white, blue)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not sequence:
return []
if len(snake_case__ ) == 1:
return list(snake_case__ )
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Tuple = len(snake_case__ ) - 1
_lowerCAmelCase : Any = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCAmelCase, _lowerCAmelCase : List[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = sequence[high], sequence[mid]
high -= 1
else:
_lowerCAmelCase : List[str] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(snake_case__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("""Enter numbers separated by commas:\n""").strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(F'''{dutch_national_flag_sort(unsorted)}''')
| 716 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 'data2vec-vision'
def __init__( self ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1E-12 ,_A=224 ,_A=16 ,_A=3 ,_A=False ,_A=False ,_A=False ,_A=False ,_A=0.1 ,_A=0.1 ,_A=True ,_A=[3, 5, 7, 11] ,_A=[1, 2, 3, 6] ,_A=True ,_A=0.4 ,_A=256 ,_A=1 ,_A=False ,_A=255 ,**_A ,):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Any = use_mask_token
_lowerCAmelCase : Any = use_absolute_position_embeddings
_lowerCAmelCase : List[Any] = use_relative_position_bias
_lowerCAmelCase : Any = use_shared_relative_position_bias
_lowerCAmelCase : Any = layer_scale_init_value
_lowerCAmelCase : Optional[int] = drop_path_rate
_lowerCAmelCase : str = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Any = out_indices
_lowerCAmelCase : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Union[str, Any] = use_auxiliary_head
_lowerCAmelCase : Optional[int] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : int = auxiliary_num_convs
_lowerCAmelCase : List[str] = auxiliary_concat_input
_lowerCAmelCase : int = semantic_loss_ignore_index
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = "deta"
_UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self ,_A=None ,_A=900 ,_A=2048 ,_A=6 ,_A=2048 ,_A=8 ,_A=6 ,_A=1024 ,_A=8 ,_A=0.0 ,_A=True ,_A="relu" ,_A=256 ,_A=0.1 ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1.0 ,_A=True ,_A=False ,_A="sine" ,_A=5 ,_A=4 ,_A=4 ,_A=True ,_A=300 ,_A=True ,_A=True ,_A=1 ,_A=5 ,_A=2 ,_A=1 ,_A=1 ,_A=5 ,_A=2 ,_A=0.1 ,_A=0.2_5 ,**_A ,):
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(snake_case__ ,snake_case__ ):
_lowerCAmelCase : Union[str, Any] = backbone_config.pop('model_type' )
_lowerCAmelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Tuple = config_class.from_dict(snake_case__ )
_lowerCAmelCase : List[str] = backbone_config
_lowerCAmelCase : List[str] = num_queries
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = d_model
_lowerCAmelCase : Tuple = encoder_ffn_dim
_lowerCAmelCase : List[Any] = encoder_layers
_lowerCAmelCase : Dict = encoder_attention_heads
_lowerCAmelCase : List[Any] = decoder_ffn_dim
_lowerCAmelCase : Any = decoder_layers
_lowerCAmelCase : Dict = decoder_attention_heads
_lowerCAmelCase : Optional[int] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[Any] = activation_dropout
_lowerCAmelCase : int = activation_function
_lowerCAmelCase : Optional[Any] = init_std
_lowerCAmelCase : str = init_xavier_std
_lowerCAmelCase : Tuple = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = auxiliary_loss
_lowerCAmelCase : Any = position_embedding_type
# deformable attributes
_lowerCAmelCase : int = num_feature_levels
_lowerCAmelCase : Union[str, Any] = encoder_n_points
_lowerCAmelCase : str = decoder_n_points
_lowerCAmelCase : str = two_stage
_lowerCAmelCase : Optional[int] = two_stage_num_proposals
_lowerCAmelCase : Any = with_box_refine
_lowerCAmelCase : int = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_lowerCAmelCase : int = class_cost
_lowerCAmelCase : Any = bbox_cost
_lowerCAmelCase : int = giou_cost
# Loss coefficients
_lowerCAmelCase : List[Any] = mask_loss_coefficient
_lowerCAmelCase : List[str] = dice_loss_coefficient
_lowerCAmelCase : Optional[int] = bbox_loss_coefficient
_lowerCAmelCase : Any = giou_loss_coefficient
_lowerCAmelCase : Any = eos_coefficient
_lowerCAmelCase : List[str] = focal_alpha
super().__init__(is_encoder_decoder=snake_case__ ,**snake_case__ )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.d_model
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : int = self.backbone_config.to_dict()
_lowerCAmelCase : List[str] = self.__class__.model_type
return output
| 718 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
_lowerCAmelCase = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCAmelCase = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase : Optional[int] = numpy_to_pil(__a )
return images
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if images.ndim == 3:
_lowerCAmelCase : List[str] = images[None, ...]
_lowerCAmelCase : List[Any] = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_lowerCAmelCase : Any = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
_lowerCAmelCase : Optional[int] = [Image.fromarray(__a ) for image in images]
return pil_images
| 719 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase ( _snake_case , unittest.TestCase ):
_UpperCAmelCase = TextToVideoSDPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_UpperCAmelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') ,up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') ,cross_attention_dim=32 ,attention_head_dim=4 ,)
_lowerCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=snake_case_ ,set_alpha_to_one=snake_case_ ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(snake_case_ )
_lowerCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(snake_case_ ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(snake_case_ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_lowerCAmelCase : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Tuple = TextToVideoSDPipeline(**snake_case_ )
_lowerCAmelCase : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_lowerCAmelCase : int = self.get_dummy_inputs(snake_case_ )
_lowerCAmelCase : Union[str, Any] = "np"
_lowerCAmelCase : Dict = sd_pipe(**snake_case_ ).frames
_lowerCAmelCase : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_lowerCAmelCase : Dict = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ ,expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ ,expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
_lowerCAmelCase : int = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCAmelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_lowerCAmelCase : Tuple = pipe.to('cuda' )
_lowerCAmelCase : List[Any] = "Spiderman is surfing"
_lowerCAmelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : int = pipe(snake_case_ ,generator=snake_case_ ,num_inference_steps=25 ,output_type='pt' ).frames
_lowerCAmelCase : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
_lowerCAmelCase : str = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
_lowerCAmelCase : int = pipe.to('cuda' )
_lowerCAmelCase : Any = "Spiderman is surfing"
_lowerCAmelCase : str = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(snake_case_ ,generator=snake_case_ ,num_inference_steps=2 ,output_type='pt' ).frames
_lowerCAmelCase : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 720 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
_UpperCAmelCase = DDIMPipeline
_UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
_UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
_lowerCAmelCase : Dict = DDIMScheduler()
_lowerCAmelCase : Tuple = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(UpperCAmelCase__ ).startswith('mps' ):
_lowerCAmelCase : Tuple = torch.manual_seed(UpperCAmelCase__ )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_lowerCAmelCase : Tuple = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = '''cpu'''
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Any = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_lowerCAmelCase : int = self.get_dummy_inputs(UpperCAmelCase__ )
_lowerCAmelCase : int = pipe(**UpperCAmelCase__ ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 32, 32, 3) )
_lowerCAmelCase : str = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
_lowerCAmelCase : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ ,1E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = '''google/ddpm-cifar10-32'''
_lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : List[str] = DDIMPipeline(unet=UpperCAmelCase__ ,scheduler=UpperCAmelCase__ )
ddim.to(UpperCAmelCase__ )
ddim.set_progress_bar_config(disable=UpperCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = ddim(generator=UpperCAmelCase__ ,eta=0.0 ,output_type='numpy' ).images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Optional[Any] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = '''google/ddpm-ema-bedroom-256'''
_lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_lowerCAmelCase : Optional[int] = DDIMScheduler.from_pretrained(UpperCAmelCase__ )
_lowerCAmelCase : Optional[int] = DDIMPipeline(unet=UpperCAmelCase__ ,scheduler=UpperCAmelCase__ )
ddpm.to(UpperCAmelCase__ )
ddpm.set_progress_bar_config(disable=UpperCAmelCase__ )
_lowerCAmelCase : Dict = torch.manual_seed(0 )
_lowerCAmelCase : int = ddpm(generator=UpperCAmelCase__ ,output_type='numpy' ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : int = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 721 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = value
_lowerCAmelCase : Any = random()
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : List[Any] = None
def __repr__( self ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = str(self.value ) + ' '
_lowerCAmelCase : str = str(self.left or '' )
_lowerCAmelCase : List[str] = str(self.right or '' )
return value + left + right
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCAmelCase, _lowerCAmelCase : List[str] = split(root.left , _lowerCAmelCase )
return left, root
else:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = split(root.right , _lowerCAmelCase )
return root, right
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCAmelCase : List[Any] = merge(left.right , _lowerCAmelCase )
return left
else:
_lowerCAmelCase : Any = merge(_lowerCAmelCase , right.left )
return right
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Node(_lowerCAmelCase )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = split(_lowerCAmelCase , _lowerCAmelCase )
return merge(merge(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Dict = split(_lowerCAmelCase , value - 1 )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = split(_lowerCAmelCase , _lowerCAmelCase )
return merge(_lowerCAmelCase , _lowerCAmelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCAmelCase : Optional[Any] = insert(_lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCAmelCase : Dict = erase(_lowerCAmelCase , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
_lowerCAmelCase : List[str] = input()
while args != "q":
_lowerCAmelCase : Optional[int] = interact_treap(_lowerCAmelCase , _lowerCAmelCase )
print(_lowerCAmelCase )
_lowerCAmelCase : List[Any] = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 700 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 0 |
"""simple docstring"""
_lowerCAmelCase = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
1_0: 'a',
1_1: 'b',
1_2: 'c',
1_3: 'd',
1_4: 'e',
1_5: 'f',
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
assert type(lowerCamelCase_ ) in (int, float) and decimal == int(lowerCamelCase_ )
_lowerCAmelCase : Tuple = int(lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = ''''''
_lowerCAmelCase : List[Any] = False
if decimal < 0:
_lowerCAmelCase : str = True
decimal *= -1
while decimal > 0:
_lowerCAmelCase : Optional[int] = divmod(lowerCamelCase_ , 16 )
_lowerCAmelCase : Any = values[remainder] + hexadecimal
_lowerCAmelCase : Optional[int] = '''0x''' + hexadecimal
if negative:
_lowerCAmelCase : Dict = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = mock.Mock()
_lowerCAmelCase : Optional[int] = 500
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = HTTPError
_lowerCAmelCase : List[Any] = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=_lowercase ) as mock_head:
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = mock.Mock()
_lowerCAmelCase : Any = 500
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Any = HTTPError
_lowerCAmelCase : Tuple = {}
# Download this model to make sure it's in the cache.
_lowerCAmelCase : Any = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=_lowercase ) as mock_head:
_lowerCAmelCase : Tuple = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self ):
'''simple docstring'''
try:
_lowerCAmelCase : Dict = tempfile.mktemp()
with open(_lowercase ,'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,_lowercase )
_lowerCAmelCase : Union[str, Any] = AlbertTokenizer.from_pretrained(_lowercase )
finally:
os.remove(_lowercase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,_lowercase )
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
_UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
_lowerCAmelCase : Any = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def __lowerCamelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Dict = os.path.join(_lowercase ,'vocab.txt' )
with open(_lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase : Any = BertTokenizer(_lowercase )
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token )
_lowerCAmelCase : List[str] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowercase ,repo_id='test-tokenizer' ,push_to_hub=_lowercase ,use_auth_token=self._token )
_lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
def __lowerCamelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[Any] = os.path.join(_lowercase ,'vocab.txt' )
with open(_lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase : List[Any] = BertTokenizer(_lowercase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token )
_lowerCAmelCase : Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowercase ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=_lowercase ,use_auth_token=self._token )
_lowerCAmelCase : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab )
@require_tokenizers
def __lowerCamelCase ( self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Any = os.path.join(_lowercase ,'vocab.txt' )
with open(_lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase : Tuple = CustomTokenizer(_lowercase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" ,trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Dict = os.path.join(_lowercase ,'vocab.txt' )
with open(_lowercase ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCAmelCase : Any = BertTokenizerFast.from_pretrained(_lowercase )
bert_tokenizer.save_pretrained(_lowercase )
_lowerCAmelCase : Any = CustomTokenizerFast.from_pretrained(_lowercase )
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" ,trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast' )
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" ,use_fast=_lowercase ,trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer' )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) ,['[CLS]', ' This is a ', 'extra_id_100'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) ,['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) ,['BC', 'A'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) ,['This is something ', '[SPECIAL_TOKEN]'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) ,['AB', 'C'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) ,['ABC', 'D'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = Trie()
_lowerCAmelCase : List[Any] = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowercase ,['AB', 'C'] )
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_lowerCAmelCase : Union[str, Any] = (low + high) // 2
_lowerCAmelCase : List[str] = max_subarray(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase : List[Any] = max_subarray(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Optional[int] = max_cross_sum(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = float('-inf' ), -1
_lowerCAmelCase : List[str] = float('-inf' ), -1
_lowerCAmelCase : int | float = 0
for i in range(__SCREAMING_SNAKE_CASE , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_lowerCAmelCase : List[str] = summ
_lowerCAmelCase : Optional[Any] = i
_lowerCAmelCase : Tuple = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_lowerCAmelCase : Tuple = summ
_lowerCAmelCase : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [randint(1 , __SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE )]
_lowerCAmelCase : int = time.time()
max_subarray(__SCREAMING_SNAKE_CASE , 0 , input_size - 1 )
_lowerCAmelCase : Optional[Any] = time.time()
return end - start
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_lowerCAmelCase : Optional[Any] = [time_max_subarray(__SCREAMING_SNAKE_CASE ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
print(__SCREAMING_SNAKE_CASE , '\t\t' , __SCREAMING_SNAKE_CASE )
plt.plot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class __UpperCamelCase ( _UpperCAmelCase ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase = RobertaTokenizer
def __init__( self ,_A=None ,_A=None ,_A=None ,_A="replace" ,_A="<s>" ,_A="</s>" ,_A="</s>" ,_A="<s>" ,_A="<unk>" ,_A="<pad>" ,_A="<mask>" ,_A=False ,_A=True ,**_A ,):
'''simple docstring'''
super().__init__(
A_ ,A_ ,tokenizer_file=A_ ,errors=A_ ,bos_token=A_ ,eos_token=A_ ,sep_token=A_ ,cls_token=A_ ,unk_token=A_ ,pad_token=A_ ,mask_token=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ,**A_ ,)
_lowerCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,A_ ) != add_prefix_space:
_lowerCAmelCase : Optional[int] = getattr(A_ ,pre_tok_state.pop('type' ) )
_lowerCAmelCase : int = add_prefix_space
_lowerCAmelCase : Tuple = pre_tok_class(**A_ )
_lowerCAmelCase : Tuple = add_prefix_space
_lowerCAmelCase : int = 'post_processor'
_lowerCAmelCase : Optional[Any] = getattr(self.backend_tokenizer ,A_ ,A_ )
if tokenizer_component_instance:
_lowerCAmelCase : List[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : Tuple = tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase : Tuple = tuple(state['cls'] )
_lowerCAmelCase : Any = False
if state.get('add_prefix_space' ,A_ ) != add_prefix_space:
_lowerCAmelCase : Optional[Any] = add_prefix_space
_lowerCAmelCase : Optional[Any] = True
if state.get('trim_offsets' ,A_ ) != trim_offsets:
_lowerCAmelCase : str = trim_offsets
_lowerCAmelCase : int = True
if changes_to_apply:
_lowerCAmelCase : int = getattr(A_ ,state.pop('type' ) )
_lowerCAmelCase : Union[str, Any] = component_class(**A_ )
setattr(self.backend_tokenizer ,A_ ,A_ )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else value
_lowerCAmelCase : Optional[int] = value
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = kwargs.get('is_split_into_words' ,A_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ ,**A_ )
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.get('is_split_into_words' ,A_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ ,**A_ )
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(A_ ,name=A_ )
return tuple(A_ )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 704 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
from math import isqrt
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = False
return [i for i in range(2 , _lowerCamelCase ) if is_prime[i]]
def lowerCamelCase__ ( _lowerCamelCase = 10**8 ):
'''simple docstring'''
_lowerCAmelCase : str = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = len(_lowerCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 0 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase ( enum.Enum ):
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 2
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self ,*_A ,**_A ):
'''simple docstring'''
super().__init__(*__UpperCamelCase ,**__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_lowerCAmelCase : Dict = None
if self.model.config.prefix is not None:
_lowerCAmelCase : Optional[int] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_lowerCAmelCase : str = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._sanitize_parameters(prefix=__UpperCamelCase ,**self._forward_params )
_lowerCAmelCase : str = {**self._preprocess_params, **preprocess_params}
_lowerCAmelCase : Union[str, Any] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : str = {}
if prefix is not None:
_lowerCAmelCase : Optional[int] = prefix
if prefix:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(
__UpperCamelCase ,padding=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
' [None, \'hole\']' )
_lowerCAmelCase : Optional[Any] = handle_long_generation
preprocess_params.update(__UpperCamelCase )
_lowerCAmelCase : str = generate_kwargs
_lowerCAmelCase : int = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_lowerCAmelCase : Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_lowerCAmelCase : Optional[Any] = ReturnType.TENSORS
if return_type is not None:
_lowerCAmelCase : Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase : str = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_lowerCAmelCase : Dict = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self ,*_A ,**_A ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*__UpperCamelCase ,**__UpperCamelCase )
def __call__( self ,_A ,**_A ):
'''simple docstring'''
return super().__call__(__UpperCamelCase ,**__UpperCamelCase )
def __lowerCamelCase ( self ,_A ,_A="" ,_A=None ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer(
prefix + prompt_text ,padding=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=self.framework )
_lowerCAmelCase : int = prompt_text
if handle_long_generation == "hole":
_lowerCAmelCase : int = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
_lowerCAmelCase : str = generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_lowerCAmelCase : List[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_lowerCAmelCase : List[Any] = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_lowerCAmelCase : Tuple = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self ,_A ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = model_inputs['input_ids']
_lowerCAmelCase : Dict = model_inputs.get('attention_mask' ,__UpperCamelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Optional[Any] = 1
else:
_lowerCAmelCase : int = input_ids.shape[0]
_lowerCAmelCase : Tuple = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_lowerCAmelCase : List[Any] = generate_kwargs.pop('prefix_length' ,0 )
if prefix_length > 0:
_lowerCAmelCase : Tuple = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_lowerCAmelCase : Union[str, Any] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_lowerCAmelCase : Any = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_lowerCAmelCase : int = self.model.generate(input_ids=__UpperCamelCase ,attention_mask=__UpperCamelCase ,**__UpperCamelCase )
_lowerCAmelCase : str = generated_sequence.shape[0]
if self.framework == "pt":
_lowerCAmelCase : int = generated_sequence.reshape(__UpperCamelCase ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
_lowerCAmelCase : Optional[int] = tf.reshape(__UpperCamelCase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self ,_A ,_A=ReturnType.FULL_TEXT ,_A=True ):
'''simple docstring'''
_lowerCAmelCase : int = model_outputs['generated_sequence'][0]
_lowerCAmelCase : Union[str, Any] = model_outputs['input_ids']
_lowerCAmelCase : Optional[Any] = model_outputs['prompt_text']
_lowerCAmelCase : Tuple = generated_sequence.numpy().tolist()
_lowerCAmelCase : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_lowerCAmelCase : Tuple = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_lowerCAmelCase : Dict = self.tokenizer.decode(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_lowerCAmelCase : List[Any] = 0
else:
_lowerCAmelCase : Tuple = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ,) )
if return_type == ReturnType.FULL_TEXT:
_lowerCAmelCase : Any = prompt_text + text[prompt_length:]
else:
_lowerCAmelCase : str = text[prompt_length:]
_lowerCAmelCase : str = {'generated_text': all_text}
records.append(__UpperCamelCase )
return records
| 706 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 0 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = model.config
_lowerCAmelCase : List[Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_lowerCAmelCase : List[Any] = MBartConfig(
is_decoder=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , add_cross_attention=_lowerCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_lowerCamelCase , add_final_layer_norm=_lowerCamelCase , )
return encoder_config, decoder_config
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if "encoder.model" in name:
_lowerCAmelCase : int = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
_lowerCAmelCase : Dict = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
_lowerCAmelCase : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCAmelCase : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
_lowerCAmelCase : Any = """encoder.""" + name
if "attn.proj" in name:
_lowerCAmelCase : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
_lowerCAmelCase : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCAmelCase : Dict = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCAmelCase : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCAmelCase : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCAmelCase : Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_lowerCAmelCase : str = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
_lowerCAmelCase : Dict = """encoder.layernorm.bias"""
return name
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : List[str] = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase : int = key.split('.' )
_lowerCAmelCase : List[str] = int(key_split[3] )
_lowerCAmelCase : Optional[Any] = int(key_split[5] )
_lowerCAmelCase : str = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase : Union[str, Any] = val[:dim, :]
_lowerCAmelCase : Optional[int] = val[dim : dim * 2, :]
_lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
_lowerCAmelCase : List[Any] = val[:dim]
_lowerCAmelCase : Optional[Any] = val[dim : dim * 2]
_lowerCAmelCase : Dict = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_lowerCAmelCase : List[str] = val
return orig_state_dict
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = DonutModel.from_pretrained(_lowerCamelCase ).eval()
# load HuggingFace model
_lowerCAmelCase : List[str] = get_configs(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = DonutSwinModel(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = MBartForCausalLM(_lowerCamelCase )
_lowerCAmelCase : Any = VisionEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
model.eval()
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : List[Any] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# verify results on scanned document
_lowerCAmelCase : str = load_dataset('hf-internal-testing/example-documents' )
_lowerCAmelCase : List[Any] = dataset["""test"""][0]["""image"""].convert('RGB' )
_lowerCAmelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained(_lowerCamelCase , from_slow=_lowerCamelCase )
_lowerCAmelCase : Tuple = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_lowerCAmelCase : Optional[int] = DonutProcessor(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = processor(_lowerCamelCase , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_lowerCAmelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Union[str, Any] = """When is the coffee break?"""
_lowerCAmelCase : List[str] = task_prompt.replace('{user_input}' , _lowerCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_lowerCAmelCase : str = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_lowerCAmelCase : str = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_lowerCAmelCase : Any = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_lowerCAmelCase : List[str] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_lowerCAmelCase : int = """hello world"""
else:
raise ValueError('Model name not supported' )
_lowerCAmelCase : List[Any] = original_model.decoder.tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors='pt' )[
"""input_ids"""
]
_lowerCAmelCase : Tuple = original_model.encoder.model.patch_embed(_lowerCamelCase )
_lowerCAmelCase : str = model.encoder.embeddings(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
# verify encoder hidden states
_lowerCAmelCase : Any = original_model.encoder(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = model.encoder(_lowerCamelCase ).last_hidden_state
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
# verify decoder hidden states
_lowerCAmelCase : int = original_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).logits
_lowerCAmelCase : Dict = model(_lowerCamelCase , decoder_input_ids=_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
_lowerCAmelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 707 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=4 ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Union[str, Any] = seq_length
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : Optional[Any] = use_attention_mask
_lowerCAmelCase : Any = use_token_type_ids
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Any = num_choices
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_attention_mask:
_lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : Tuple = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=UpperCamelCase__ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase ( __A , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FlaxBertModelTester(self )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = FlaxBertModel.from_pretrained('bert-base-cased' )
_lowerCAmelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return base * power(_lowerCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
_lowerCAmelCase = int(input("""Enter the base: """).strip())
_lowerCAmelCase = int(input("""Enter the exponent: """).strip())
_lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_lowerCAmelCase = 1 / result
print(F'''{base} to the power of {exponent} is {result}''') | 709 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , 'html.parser' )
_lowerCAmelCase : Optional[int] = soup.find('div' , attrs={'class': 'gs_ri'} )
_lowerCAmelCase : str = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCAmelCase = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 710 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCamelCase ( __UpperCAmelCase ):
def __init__( self ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ ,scheduler=lowerCAmelCase_ ,movq=lowerCAmelCase_ ,)
_lowerCAmelCase : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
if latents is None:
_lowerCAmelCase : Union[str, Any] = randn_tensor(lowerCAmelCase_ ,generator=lowerCAmelCase_ ,device=lowerCAmelCase_ ,dtype=lowerCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_lowerCAmelCase : int = latents.to(lowerCAmelCase_ )
_lowerCAmelCase : List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self ,_A=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowerCAmelCase : int = torch.device(F"""cuda:{gpu_id}""" )
_lowerCAmelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ ,lowerCAmelCase_ )
def __lowerCamelCase ( self ,_A=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_lowerCAmelCase : Tuple = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase, _lowerCAmelCase : Tuple = cpu_offload_with_hook(lowerCAmelCase_ ,lowerCAmelCase_ ,prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_lowerCAmelCase : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self ,_A ,_A ,_A = 512 ,_A = 512 ,_A = 100 ,_A = 4.0 ,_A = 1 ,_A = None ,_A = None ,_A = "pil" ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : int = self._execution_device
_lowerCAmelCase : str = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
_lowerCAmelCase : Optional[Any] = torch.cat(lowerCAmelCase_ ,dim=0 )
_lowerCAmelCase : Tuple = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ):
_lowerCAmelCase : Dict = torch.cat(lowerCAmelCase_ ,dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : Tuple = image_embeds.repeat_interleave(lowerCAmelCase_ ,dim=0 )
_lowerCAmelCase : List[Any] = negative_image_embeds.repeat_interleave(lowerCAmelCase_ ,dim=0 )
_lowerCAmelCase : str = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCAmelCase_ )
self.scheduler.set_timesteps(lowerCAmelCase_ ,device=lowerCAmelCase_ )
_lowerCAmelCase : Dict = self.scheduler.timesteps
_lowerCAmelCase : Union[str, Any] = self.unet.config.in_channels
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = downscale_height_and_width(lowerCAmelCase_ ,lowerCAmelCase_ ,self.movq_scale_factor )
# create initial latent
_lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : List[str] = {'image_embeds': image_embeds}
_lowerCAmelCase : Dict = self.unet(
sample=lowerCAmelCase_ ,timestep=lowerCAmelCase_ ,encoder_hidden_states=lowerCAmelCase_ ,added_cond_kwargs=lowerCAmelCase_ ,return_dict=lowerCAmelCase_ ,)[0]
if do_classifier_free_guidance:
_lowerCAmelCase, _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCAmelCase, _lowerCAmelCase : str = noise_pred.chunk(2 )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = variance_pred.chunk(2 )
_lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Dict = self.scheduler.step(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,generator=lowerCAmelCase_ ,)[0]
# post-processing
_lowerCAmelCase : Dict = self.movq.decode(lowerCAmelCase_ ,force_not_quantize=lowerCAmelCase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : int = image * 0.5 + 0.5
_lowerCAmelCase : Union[str, Any] = image.clamp(0 ,1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 711 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __UpperCamelCase ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = nn.Linear(3 ,4 )
_lowerCAmelCase : Tuple = nn.BatchNormad(4 )
_lowerCAmelCase : List[Any] = nn.Linear(4 ,5 )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_UpperCAmelCase ) ) )
class __UpperCamelCase ( _UpperCamelCase ):
def __lowerCamelCase ( self ,_A ,*_A ,**_A ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __UpperCamelCase ( _UpperCamelCase ):
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return output + 1
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = ModelForTest()
_lowerCAmelCase : Dict = ModelHook()
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual(test_model._hf_hook ,_UpperCAmelCase )
self.assertTrue(hasattr(_UpperCAmelCase ,'_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['x'] )
remove_hook_from_module(_UpperCAmelCase )
self.assertFalse(hasattr(_UpperCAmelCase ,'_hf_hook' ) )
self.assertFalse(hasattr(_UpperCAmelCase ,'_old_forward' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ModelForTest()
_lowerCAmelCase : List[str] = ModelHook()
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase ,append=_UpperCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook ,_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) ,2 )
self.assertTrue(hasattr(_UpperCAmelCase ,'_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ ,'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) ,['x'] )
remove_hook_from_module(_UpperCAmelCase )
self.assertFalse(hasattr(_UpperCAmelCase ,'_hf_hook' ) )
self.assertFalse(hasattr(_UpperCAmelCase ,'_old_forward' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = ModelForTest()
_lowerCAmelCase : List[Any] = torch.randn(2 ,3 )
_lowerCAmelCase : Optional[Any] = test_model(x + 1 )
_lowerCAmelCase : Optional[int] = test_model(x + 2 )
_lowerCAmelCase : Optional[int] = PreForwardHook()
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
_lowerCAmelCase : Tuple = test_model(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCAmelCase : Union[str, Any] = PreForwardHook()
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = test_model(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCAmelCase : List[str] = SequentialHook(PreForwardHook() ,PreForwardHook() )
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
_lowerCAmelCase : List[str] = test_model(_UpperCAmelCase )
assert torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1E-5 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ModelForTest()
_lowerCAmelCase : Tuple = torch.randn(2 ,3 )
_lowerCAmelCase : List[Any] = test_model(_UpperCAmelCase )
_lowerCAmelCase : int = PostForwardHook()
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
_lowerCAmelCase : List[str] = test_model(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase ,output + 1 ,atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCAmelCase : Any = PostForwardHook()
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
_lowerCAmelCase : Dict = test_model(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase ,output + 1 ,atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCAmelCase : List[Any] = SequentialHook(PostForwardHook() ,PostForwardHook() )
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
_lowerCAmelCase : List[Any] = test_model(_UpperCAmelCase )
assert torch.allclose(_UpperCAmelCase ,output + 2 ,atol=1E-5 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = ModelForTest()
_lowerCAmelCase : Optional[int] = torch.randn(2 ,3 )
_lowerCAmelCase : int = test_model(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = PostForwardHook()
add_hook_to_module(_UpperCAmelCase ,_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = test_model(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase ,output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[Any] = test_model(_UpperCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device ,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowerCAmelCase : Dict = torch.randn(2 ,3 )
_lowerCAmelCase : Dict = model(_UpperCAmelCase )
self.assertEqual(output.device ,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_UpperCAmelCase ,AlignDevicesHook(io_same_device=_UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = torch.randn(2 ,3 ).to(0 )
_lowerCAmelCase : Optional[int] = model(_UpperCAmelCase )
self.assertEqual(output.device ,torch.device(0 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase : int = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara ,AlignDevicesHook(**_UpperCAmelCase ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**_UpperCAmelCase ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**_UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase : Tuple = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device ,_UpperCAmelCase )
_lowerCAmelCase : Tuple = torch.randn(2 ,3 )
_lowerCAmelCase : Optional[int] = model(_UpperCAmelCase )
self.assertEqual(output.device ,_UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
_lowerCAmelCase : Optional[Any] = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara ,AlignDevicesHook(**_UpperCAmelCase ) )
add_hook_to_module(model.batchnorm ,AlignDevicesHook(**_UpperCAmelCase ) )
add_hook_to_module(model.lineara ,AlignDevicesHook(**_UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowerCAmelCase : int = torch.randn(2 ,3 )
_lowerCAmelCase : Tuple = model(_UpperCAmelCase )
self.assertEqual(output.device ,_UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase : str = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_UpperCAmelCase ,execution_device=_UpperCAmelCase ,offload=_UpperCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase : List[str] = torch.device(_UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device ,_UpperCAmelCase )
_lowerCAmelCase : Dict = torch.randn(2 ,3 )
_lowerCAmelCase : Dict = model(_UpperCAmelCase )
self.assertEqual(output.device ,_UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_UpperCAmelCase )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(_UpperCAmelCase ,execution_device=_UpperCAmelCase ,offload=_UpperCAmelCase ,offload_buffers=_UpperCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowerCAmelCase : Tuple = torch.randn(2 ,3 )
_lowerCAmelCase : str = model(_UpperCAmelCase )
self.assertEqual(output.device ,_UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_UpperCAmelCase )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# This will move each submodule on different devices
_lowerCAmelCase : List[Any] = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_UpperCAmelCase ,execution_device=_UpperCAmelCase ,offload=_UpperCAmelCase ,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase : List[Any] = torch.device(_UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device ,_UpperCAmelCase )
_lowerCAmelCase : Dict = torch.randn(2 ,3 )
_lowerCAmelCase : Union[str, Any] = model(_UpperCAmelCase )
self.assertEqual(output.device ,_UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_UpperCAmelCase )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
_UpperCAmelCase ,execution_device=_UpperCAmelCase ,offload=_UpperCAmelCase ,weights_map=model.state_dict() ,offload_buffers=_UpperCAmelCase ,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device ,torch.device('meta' ) )
_lowerCAmelCase : Optional[Any] = torch.randn(2 ,3 )
_lowerCAmelCase : Optional[Any] = model(_UpperCAmelCase )
self.assertEqual(output.device ,_UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_UpperCAmelCase )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device ,torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device ,torch.device('cpu' ) )
| 712 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase="pt" ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {'add_prefix_space': True} if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not line.startswith(' ' ) else {}
_lowerCAmelCase : Union[str, Any] = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase__ , padding='max_length' if pad_to_max_length else None , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = input_ids.ne(UpperCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( _a ):
def __init__( self ,_A ,_A ,_A ,_A ,_A="train" ,_A=None ,_A=None ,_A=None ,_A="" ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Path(_A ).joinpath(type_path + '.source' )
_lowerCAmelCase : Union[str, Any] = Path(_A ).joinpath(type_path + '.target' )
_lowerCAmelCase : Optional[int] = self.get_char_lens(self.src_file )
_lowerCAmelCase : Union[str, Any] = max_source_length
_lowerCAmelCase : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : List[Any] = prefix
if n_obs is not None:
_lowerCAmelCase : Union[str, Any] = self.src_lens[:n_obs]
_lowerCAmelCase : Tuple = src_lang
_lowerCAmelCase : List[str] = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = index + 1 # linecache starts at 1
_lowerCAmelCase : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,_A ).rstrip('\n' )
_lowerCAmelCase : Tuple = linecache.getline(str(self.tgt_file ) ,_A ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCAmelCase : Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_A ) else self.tokenizer
)
_lowerCAmelCase : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer ,_A ) else self.tokenizer
_lowerCAmelCase : Optional[int] = encode_line(_A ,_A ,self.max_source_length ,'right' )
_lowerCAmelCase : Tuple = encode_line(_A ,_A ,self.max_target_length ,'right' )
_lowerCAmelCase : Optional[int] = source_inputs['input_ids'].squeeze()
_lowerCAmelCase : Optional[Any] = target_inputs['input_ids'].squeeze()
_lowerCAmelCase : Union[str, Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
return [len(_A ) for x in Path(_A ).open().readlines()]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch.stack([x['input_ids'] for x in batch] )
_lowerCAmelCase : List[str] = torch.stack([x['attention_mask'] for x in batch] )
_lowerCAmelCase : Tuple = torch.stack([x['decoder_input_ids'] for x in batch] )
_lowerCAmelCase : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : Any = trim_batch(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Tuple = trim_batch(_A ,_A ,attention_mask=_A )
_lowerCAmelCase : Any = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(UpperCAmelCase__ ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = get_git_info()
save_json(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , 'git_log.json' ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=4 , **_lowerCamelCase ):
'''simple docstring'''
with open(UpperCAmelCase__ , 'w' ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , indent=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(UpperCAmelCase__ ) as f:
return json.load(UpperCAmelCase__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = git.Repo(search_parent_directories=UpperCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = {
'repo_id': str(UpperCAmelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return list(map(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(UpperCAmelCase__ , 'wb' ) as f:
return pickle.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def remove_articles(_lowerCamelCase ):
return re.sub(R'\b(a|an|the)\b' , ' ' , UpperCAmelCase__ )
def white_space_fix(_lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase ):
_lowerCAmelCase : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = normalize_answer(UpperCAmelCase__ ).split()
_lowerCAmelCase : Tuple = normalize_answer(UpperCAmelCase__ ).split()
_lowerCAmelCase : Tuple = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ )
_lowerCAmelCase : str = sum(common.values() )
if num_same == 0:
return 0
_lowerCAmelCase : Tuple = 1.0 * num_same / len(UpperCAmelCase__ )
_lowerCAmelCase : str = 1.0 * num_same / len(UpperCAmelCase__ )
_lowerCAmelCase : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
_lowerCAmelCase : str = 0
for hypo, pred in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
em += exact_match_score(UpperCAmelCase__ , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
em /= len(UpperCAmelCase__ )
return {"em": em}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCAmelCase : Tuple = 'dropout_rate'
for p in extra_params:
if getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(UpperCAmelCase__ ) )
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
continue
_lowerCAmelCase : str = p if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) else equivalent_param[p]
setattr(UpperCAmelCase__ , UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
return hparams, config
| 713 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 0 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __UpperCamelCase ( a__ ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : int = 'pt'
_lowerCAmelCase : Dict = 'tf'
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCamelCase )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = TFAutoModel.from_pretrained(self.test_model ,from_pt=__UpperCamelCase )
model_tf.save_pretrained(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'mock_framework'
# Framework provided - return whatever the user provides
_lowerCAmelCase : int = FeaturesManager.determine_framework(self.test_model ,__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCamelCase )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCamelCase )
_lowerCAmelCase : Any = FeaturesManager.determine_framework(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCamelCase )
_lowerCAmelCase : Any = FeaturesManager.determine_framework(__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCamelCase )
_lowerCAmelCase : int = FeaturesManager.determine_framework(__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCamelCase ):
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(__UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MagicMock(return_value=__UpperCamelCase )
with patch('transformers.onnx.features.is_tf_available' ,__UpperCamelCase ):
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase ,self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : Optional[int] = MagicMock(return_value=__UpperCamelCase )
with patch('transformers.onnx.features.is_torch_available' ,__UpperCamelCase ):
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase ,self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : Tuple = MagicMock(return_value=__UpperCamelCase )
_lowerCAmelCase : str = MagicMock(return_value=__UpperCamelCase )
with patch('transformers.onnx.features.is_tf_available' ,__UpperCamelCase ), patch(
'transformers.onnx.features.is_torch_available' ,__UpperCamelCase ):
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCamelCase ,self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : int = MagicMock(return_value=__UpperCamelCase )
_lowerCAmelCase : Any = MagicMock(return_value=__UpperCamelCase )
with patch('transformers.onnx.features.is_tf_available' ,__UpperCamelCase ), patch(
'transformers.onnx.features.is_torch_available' ,__UpperCamelCase ):
with self.assertRaises(__UpperCamelCase ):
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(self.test_model )
| 714 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["""pixel_values"""]
def __init__( self ,_A = True ,_A = None ,_A = PILImageResampling.BILINEAR ,_A = True ,_A = None ,_A = True ,_A = 1 / 255 ,_A = True ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
_lowerCAmelCase : Optional[Any] = size if size is not None else {'shortest_edge': 256}
_lowerCAmelCase : int = get_size_dict(_A ,default_to_square=_A )
_lowerCAmelCase : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase : int = get_size_dict(_A )
_lowerCAmelCase : List[str] = do_resize
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : Union[str, Any] = resample
_lowerCAmelCase : Dict = do_center_crop
_lowerCAmelCase : str = crop_size
_lowerCAmelCase : Dict = do_rescale
_lowerCAmelCase : Dict = rescale_factor
_lowerCAmelCase : Optional[int] = do_normalize
_lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self ,_A ,_A ,_A = PILImageResampling.BICUBIC ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = get_size_dict(_A ,default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_lowerCAmelCase : str = get_resize_output_image_size(_A ,size=size['shortest_edge'] ,default_to_square=_A )
return resize(_A ,size=_A ,resample=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_size_dict(_A )
return center_crop(_A ,size=(size['height'], size['width']) ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = None ,**_A ):
'''simple docstring'''
return rescale(_A ,scale=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,**_A ,):
'''simple docstring'''
return normalize(_A ,mean=_A ,std=_A ,data_format=_A ,**_A )
def __lowerCamelCase ( self ,_A ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = None ,_A = ChannelDimension.FIRST ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Optional[Any] = size if size is not None else self.size
_lowerCAmelCase : List[Any] = get_size_dict(_A ,default_to_square=_A )
_lowerCAmelCase : List[str] = resample if resample is not None else self.resample
_lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : str = get_size_dict(_A )
_lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase : Any = [to_numpy_array(_A ) for image in images]
if do_resize:
_lowerCAmelCase : str = [self.resize(image=_A ,size=_A ,resample=_A ) for image in images]
if do_center_crop:
_lowerCAmelCase : Union[str, Any] = [self.center_crop(image=_A ,size=_A ) for image in images]
if do_rescale:
_lowerCAmelCase : List[str] = [self.rescale(image=_A ,scale=_A ) for image in images]
if do_normalize:
_lowerCAmelCase : Union[str, Any] = [self.normalize(image=_A ,mean=_A ,std=_A ) for image in images]
_lowerCAmelCase : str = [to_channel_dimension_format(_A ,_A ) for image in images]
_lowerCAmelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A ,tensor_type=_A )
| 715 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 0 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = """Hello world! cécé herlolip"""
_lowerCAmelCase = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = BertAbsConfig(
temp_dir='.' , finetune_bert=_lowerCamelCase , large=_lowerCamelCase , share_emb=_lowerCamelCase , use_bert_emb=_lowerCamelCase , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
_lowerCAmelCase : Optional[Any] = torch.load(_lowerCamelCase , lambda _lowerCamelCase , _lowerCamelCase : storage )
_lowerCAmelCase : Any = AbsSummarizer(_lowerCamelCase , torch.device('cpu' ) , _lowerCamelCase )
original.eval()
_lowerCAmelCase : Tuple = BertAbsSummarizer(_lowerCamelCase , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
_lowerCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
_lowerCAmelCase : Any = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_lowerCamelCase )) )
_lowerCAmelCase : Optional[int] = torch.tensor(_lowerCamelCase ).unsqueeze(0 )
_lowerCAmelCase : Any = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_lowerCamelCase )) )
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_lowerCAmelCase : int = encoder_input_ids
_lowerCAmelCase : Any = decoder_input_ids
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Dict = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Any = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_lowerCAmelCase : Tuple = original(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )[0]
_lowerCAmelCase : Tuple = original.generator(_lowerCamelCase )
_lowerCAmelCase : List[Any] = new_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )[0]
_lowerCAmelCase : Any = new_model.generator(_lowerCamelCase )
_lowerCAmelCase : List[str] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_lowerCamelCase ) )
_lowerCAmelCase : str = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_lowerCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 716 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase = AltDiffusionPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Tuple = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
_lowerCAmelCase : List[Any] = CLIPTextModel(lowerCamelCase__ )
_lowerCAmelCase : List[Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_lowerCAmelCase : Tuple = 77
_lowerCAmelCase : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
_lowerCAmelCase : Optional[int] = torch.manual_seed(lowerCamelCase__ )
else:
_lowerCAmelCase : Dict = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_lowerCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
torch.manual_seed(0 )
_lowerCAmelCase : str = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
_lowerCAmelCase : int = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
_lowerCAmelCase : Any = text_encoder
_lowerCAmelCase : Dict = AltDiffusionPipeline(**lowerCamelCase__ )
_lowerCAmelCase : Any = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase : Dict = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCAmelCase : Optional[int] = '''A photo of an astronaut'''
_lowerCAmelCase : int = alt_pipe(**lowerCamelCase__ )
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[Any] = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
_lowerCAmelCase : int = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
_lowerCAmelCase : str = text_encoder
_lowerCAmelCase : str = AltDiffusionPipeline(**lowerCamelCase__ )
_lowerCAmelCase : Optional[Any] = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(lowerCamelCase__ )
_lowerCAmelCase : Tuple = alt_pipe(**lowerCamelCase__ )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[Any] = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' ,safety_checker=lowerCamelCase__ )
_lowerCAmelCase : Union[str, Any] = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase : Any = '''A painting of a squirrel eating a burger'''
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = alt_pipe([prompt] ,generator=lowerCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type='np' )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : int = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' ,subfolder='scheduler' )
_lowerCAmelCase : Optional[int] = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
_lowerCAmelCase : Dict = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase : Optional[int] = '''A painting of a squirrel eating a burger'''
_lowerCAmelCase : Dict = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = alt_pipe([prompt] ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type='numpy' )
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : str = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __UpperCamelCase :
def __init__( self ,_A ,_A ,_A ,_A=None ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = start
_lowerCAmelCase : int = end
_lowerCAmelCase : Optional[Any] = val
_lowerCAmelCase : Optional[int] = (start + end) // 2
_lowerCAmelCase : Optional[Any] = left
_lowerCAmelCase : Optional[Any] = right
def __repr__( self ):
'''simple docstring'''
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __UpperCamelCase :
def __init__( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = collection
_lowerCAmelCase : Any = function
if self.collection:
_lowerCAmelCase : List[str] = self._build_tree(0 ,len(_A ) - 1 )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
self._update_tree(self.root ,_A ,_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self._query_range(self.root ,_A ,_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(_A ,_A ,self.collection[start] )
_lowerCAmelCase : str = (start + end) // 2
_lowerCAmelCase : Optional[int] = self._build_tree(_A ,_A )
_lowerCAmelCase : Dict = self._build_tree(mid + 1 ,_A )
return SegmentTreeNode(_A ,_A ,self.fn(left.val ,right.val ) ,_A ,_A )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
if node.start == i and node.end == i:
_lowerCAmelCase : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left ,_A ,_A )
else:
self._update_tree(node.right ,_A ,_A )
_lowerCAmelCase : List[Any] = self.fn(node.left.val ,node.right.val )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left ,_A ,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left ,_A ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,_A ) ,)
else:
# range in right child tree
return self._query_range(node.right ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.root is not None:
_lowerCAmelCase : Union[str, Any] = Queue()
queue.put(self.root )
while not queue.empty():
_lowerCAmelCase : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 5_0)
_lowerCAmelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 718 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 719 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def update_area_of_max_square(_lowerCamelCase , _lowerCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_lowerCAmelCase : Tuple = update_area_of_max_square(lowerCamelCase__ , col + 1 )
_lowerCAmelCase : Optional[int] = update_area_of_max_square(row + 1 , col + 1 )
_lowerCAmelCase : List[str] = update_area_of_max_square(row + 1 , lowerCamelCase__ )
if mat[row][col]:
_lowerCAmelCase : Optional[Any] = 1 + min([right, diagonal, down] )
_lowerCAmelCase : str = max(largest_square_area[0] , lowerCamelCase__ )
return sub_problem_sol
else:
return 0
_lowerCAmelCase : Optional[Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_lowerCAmelCase : List[str] = update_area_of_max_square_using_dp_array(lowerCamelCase__ , col + 1 , lowerCamelCase__ )
_lowerCAmelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCamelCase__ )
_lowerCAmelCase : str = update_area_of_max_square_using_dp_array(row + 1 , lowerCamelCase__ , lowerCamelCase__ )
if mat[row][col]:
_lowerCAmelCase : str = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Any = max(largest_square_area[0] , lowerCamelCase__ )
_lowerCAmelCase : Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
_lowerCAmelCase : Dict = [0]
_lowerCAmelCase : Any = [[-1] * cols for _ in range(lowerCamelCase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCamelCase__ )
return largest_square_area[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
_lowerCAmelCase : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : Optional[Any] = dp_array[row][col + 1]
_lowerCAmelCase : List[Any] = dp_array[row + 1][col + 1]
_lowerCAmelCase : int = dp_array[row + 1][col]
if mat[row][col] == 1:
_lowerCAmelCase : List[Any] = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase : Union[str, Any] = max(dp_array[row][col] , lowerCamelCase__ )
else:
_lowerCAmelCase : List[str] = 0
return largest_square_area
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = [0] * (cols + 1)
_lowerCAmelCase : str = [0] * (cols + 1)
_lowerCAmelCase : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : Any = current_row[col + 1]
_lowerCAmelCase : Optional[Any] = next_row[col + 1]
_lowerCAmelCase : Tuple = next_row[col]
if mat[row][col] == 1:
_lowerCAmelCase : Dict = 1 + min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase : Any = max(current_row[col] , lowerCamelCase__ )
else:
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 720 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 0 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowerCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCAmelCase = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_lowerCAmelCase = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = None
# source code of `config_class`
_lowerCAmelCase : List[Any] = inspect.getsource(_lowerCamelCase )
_lowerCAmelCase : Any = _re_checkpoint.findall(_lowerCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_lowerCAmelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase : Optional[int] = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase : Tuple = ckpt_name
break
return checkpoint
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase : str = get_checkpoint_from_config_class(_lowerCamelCase )
_lowerCAmelCase : int = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : List[str] = '\n'.join(sorted(_lowerCamelCase ) )
raise ValueError(f"""The following configurations don\'t contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 721 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = """MobileNetV1Config"""
# Base docstring
_lowerCAmelCase = """google/mobilenet_v1_1.0_224"""
_lowerCAmelCase = [1, 1_0_2_4, 7, 7]
# Image classification docstring
_lowerCAmelCase = """google/mobilenet_v1_1.0_224"""
_lowerCAmelCase = """tabby, tabby cat"""
_lowerCAmelCase = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Tuple = model.mobilenet_va
else:
_lowerCAmelCase : Dict = model
_lowerCAmelCase : Optional[int] = 'MobilenetV1/Conv2d_0/'
_lowerCAmelCase : Optional[int] = backbone.conv_stem.convolution.weight
_lowerCAmelCase : Any = backbone.conv_stem.normalization.bias
_lowerCAmelCase : List[Any] = backbone.conv_stem.normalization.weight
_lowerCAmelCase : int = backbone.conv_stem.normalization.running_mean
_lowerCAmelCase : Union[str, Any] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_lowerCAmelCase : int = i + 1
_lowerCAmelCase : Optional[Any] = i * 2
_lowerCAmelCase : Optional[Any] = backbone.layer[pt_index]
_lowerCAmelCase : int = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
_lowerCAmelCase : str = pointer.convolution.weight
_lowerCAmelCase : Optional[Any] = pointer.normalization.bias
_lowerCAmelCase : Union[str, Any] = pointer.normalization.weight
_lowerCAmelCase : int = pointer.normalization.running_mean
_lowerCAmelCase : int = pointer.normalization.running_var
_lowerCAmelCase : Optional[int] = backbone.layer[pt_index + 1]
_lowerCAmelCase : str = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
_lowerCAmelCase : List[Any] = pointer.convolution.weight
_lowerCAmelCase : List[Any] = pointer.normalization.bias
_lowerCAmelCase : str = pointer.normalization.weight
_lowerCAmelCase : Optional[Any] = pointer.normalization.running_mean
_lowerCAmelCase : Optional[int] = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
_lowerCAmelCase : Tuple = model.classifier.weight
_lowerCAmelCase : Dict = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
_lowerCAmelCase : int = tf.train.list_variables(_lowerCamelCase )
_lowerCAmelCase : Dict = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
_lowerCAmelCase : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = array
# Build TF to PyTorch weights loading map
_lowerCAmelCase : str = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
_lowerCAmelCase : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
_lowerCAmelCase : str = np.transpose(_lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
_lowerCAmelCase : Dict = array.squeeze().transpose()
else:
_lowerCAmelCase : List[str] = np.transpose(_lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
_lowerCAmelCase : Any = torch.from_numpy(_lowerCamelCase )
tf_weights.pop(_lowerCamelCase , _lowerCamelCase )
tf_weights.pop(name + '/RMSProp' , _lowerCamelCase )
tf_weights.pop(name + '/RMSProp_1' , _lowerCamelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' , _lowerCamelCase )
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = features.shape[-2:]
_lowerCAmelCase : List[Any] = conv_layer.stride
_lowerCAmelCase : List[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
_lowerCAmelCase : Dict = max(kernel_height - stride_height , 0 )
else:
_lowerCAmelCase : Dict = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_lowerCAmelCase : List[str] = max(kernel_width - stride_width , 0 )
else:
_lowerCAmelCase : Optional[Any] = max(kernel_width - (in_width % stride_width) , 0 )
_lowerCAmelCase : Union[str, Any] = pad_along_width // 2
_lowerCAmelCase : str = pad_along_width - pad_left
_lowerCAmelCase : List[str] = pad_along_height // 2
_lowerCAmelCase : Optional[int] = pad_along_height - pad_top
_lowerCAmelCase : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A = 1 ,_A = 1 ,_A = False ,_A = True ,_A = True ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
_lowerCAmelCase : Union[str, Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_lowerCAmelCase : Optional[Any] = nn.Convad(
in_channels=_A ,out_channels=_A ,kernel_size=_A ,stride=_A ,padding=_A ,groups=_A ,bias=_A ,padding_mode='zeros' ,)
if use_normalization:
_lowerCAmelCase : Union[str, Any] = nn.BatchNormad(
num_features=_A ,eps=config.layer_norm_eps ,momentum=0.9_9_9_7 ,affine=_A ,track_running_stats=_A ,)
else:
_lowerCAmelCase : Dict = None
if use_activation:
if isinstance(_A ,_A ):
_lowerCAmelCase : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,_A ):
_lowerCAmelCase : List[Any] = ACTaFN[config.hidden_act]
else:
_lowerCAmelCase : Dict = config.hidden_act
else:
_lowerCAmelCase : Any = None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.config.tf_padding:
_lowerCAmelCase : Any = apply_tf_padding(_A ,self.convolution )
_lowerCAmelCase : int = self.convolution(_A )
if self.normalization is not None:
_lowerCAmelCase : Tuple = self.normalization(_A )
if self.activation is not None:
_lowerCAmelCase : Optional[int] = self.activation(_A )
return features
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = MobileNetVaConfig
_UpperCAmelCase = load_tf_weights_in_mobilenet_va
_UpperCAmelCase = "mobilenet_v1"
_UpperCAmelCase = "pixel_values"
_UpperCAmelCase = False
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if isinstance(_A ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A ,nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_lowerCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A = True ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : Optional[Any] = config
_lowerCAmelCase : Any = 32
_lowerCAmelCase : List[Any] = max(int(depth * config.depth_multiplier ) ,config.min_depth )
_lowerCAmelCase : Any = MobileNetVaConvLayer(
_A ,in_channels=config.num_channels ,out_channels=_A ,kernel_size=3 ,stride=2 ,)
_lowerCAmelCase : List[str] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_lowerCAmelCase : Tuple = nn.ModuleList()
for i in range(13 ):
_lowerCAmelCase : Any = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_lowerCAmelCase : str = max(int(depth * config.depth_multiplier ) ,config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_A ,in_channels=_A ,out_channels=_A ,kernel_size=3 ,stride=strides[i] ,groups=_A ,) )
self.layer.append(
MobileNetVaConvLayer(
_A ,in_channels=_A ,out_channels=_A ,kernel_size=1 ,) )
_lowerCAmelCase : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
_lowerCAmelCase : Any = self.conv_stem(_A )
_lowerCAmelCase : Optional[int] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_lowerCAmelCase : Tuple = layer_module(_A )
if output_hidden_states:
_lowerCAmelCase : int = all_hidden_states + (hidden_states,)
_lowerCAmelCase : Dict = hidden_states
if self.pooler is not None:
_lowerCAmelCase : Union[str, Any] = torch.flatten(self.pooler(_A ) ,start_dim=1 )
else:
_lowerCAmelCase : Tuple = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A ,pooler_output=_A ,hidden_states=_A ,)
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ):
'''simple docstring'''
super().__init__(_A )
_lowerCAmelCase : Dict = config.num_labels
_lowerCAmelCase : List[str] = MobileNetVaModel(_A )
_lowerCAmelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_lowerCAmelCase : Dict = nn.Dropout(config.classifier_dropout_prob ,inplace=_A )
_lowerCAmelCase : List[str] = nn.Linear(_A ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowerCamelCase ( self ,_A = None ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Any = self.mobilenet_va(_A ,output_hidden_states=_A ,return_dict=_A )
_lowerCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : Optional[int] = self.classifier(self.dropout(_A ) )
_lowerCAmelCase : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : int = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : List[Any] = 'single_label_classification'
else:
_lowerCAmelCase : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[Any] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_lowerCAmelCase : Union[str, Any] = loss_fct(_A ,_A )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Union[str, Any] = CrossEntropyLoss()
_lowerCAmelCase : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : Any = BCEWithLogitsLoss()
_lowerCAmelCase : int = loss_fct(_A ,_A )
if not return_dict:
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_A ,logits=_A ,hidden_states=outputs.hidden_states ,)
| 700 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_lowerCAmelCase = int(input("""Enter number: """).strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def __lowerCamelCase ( *_A ,**_A ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ObjectDetectionPipeline(model=_A ,image_processor=_A )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' ,threshold=0.0 )
self.assertGreater(len(_A ) ,0 )
for detected_object in outputs:
self.assertEqual(
_A ,{
'score': ANY(_A ),
'label': ANY(_A ),
'box': {'xmin': ANY(_A ), 'ymin': ANY(_A ), 'xmax': ANY(_A ), 'ymax': ANY(_A )},
} ,)
import datasets
_lowerCAmelCase : List[Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
_lowerCAmelCase : Optional[int] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_lowerCAmelCase : Dict = object_detector(_A ,threshold=0.0 )
self.assertEqual(len(_A ) ,len(_A ) )
for outputs in batch_outputs:
self.assertGreater(len(_A ) ,0 )
for detected_object in outputs:
self.assertEqual(
_A ,{
'score': ANY(_A ),
'label': ANY(_A ),
'box': {'xmin': ANY(_A ), 'ymin': ANY(_A ), 'xmax': ANY(_A ), 'ymax': ANY(_A )},
} ,)
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_lowerCAmelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(_A )
_lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained(_A )
_lowerCAmelCase : Tuple = ObjectDetectionPipeline(model=_A ,feature_extractor=_A )
_lowerCAmelCase : str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=0.0 )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] ,)
_lowerCAmelCase : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] ,)
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'facebook/detr-resnet-50'
_lowerCAmelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(_A )
_lowerCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(_A )
_lowerCAmelCase : List[str] = ObjectDetectionPipeline(model=_A ,feature_extractor=_A )
_lowerCAmelCase : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_lowerCAmelCase : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'facebook/detr-resnet-50'
_lowerCAmelCase : Dict = pipeline('object-detection' ,model=_A )
_lowerCAmelCase : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
_lowerCAmelCase : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] ,)
@require_torch
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0.9_9_8_5
_lowerCAmelCase : str = 'facebook/detr-resnet-50'
_lowerCAmelCase : Optional[int] = pipeline('object-detection' ,model=_A )
_lowerCAmelCase : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=_A )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'Narsil/layoutlmv3-finetuned-funsd'
_lowerCAmelCase : Tuple = 0.9_9_9_3
_lowerCAmelCase : str = pipeline('object-detection' ,model=_A ,threshold=_A )
_lowerCAmelCase : Any = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] ,)
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCamelCase , 2 ) - pow(_lowerCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCamelCase , 2 ) - pow(_lowerCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCamelCase , 2 ) + pow(_lowerCamelCase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(default=a__ , metadata={"help": "The input training data file (a text file)."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.train_file is not None:
_lowerCAmelCase : Any = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCAmelCase : Optional[int] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = 42
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = None
def __call__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = 'label' if 'label' in features[0].keys() else 'labels'
_lowerCAmelCase : Dict = [feature.pop(_A ) for feature in features]
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Any = len(features[0]['input_ids'] )
_lowerCAmelCase : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(_A )] for feature in features
]
_lowerCAmelCase : Any = list(chain(*_A ) )
_lowerCAmelCase : List[str] = self.tokenizer.pad(
_A ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_lowerCAmelCase : Union[str, Any] = {k: v.view(_A ,_A ,-1 ) for k, v in batch.items()}
# Add back labels
_lowerCAmelCase : Dict = torch.tensor(_A ,dtype=torch.intaa )
return batch
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_lowerCAmelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCAmelCase : Optional[Any] = {}
if data_args.train_file is not None:
_lowerCAmelCase : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
_lowerCAmelCase : Any = data_args.validation_file
_lowerCAmelCase : List[Any] = data_args.train_file.split('.' )[-1]
_lowerCAmelCase : str = load_dataset(
_lowerCamelCase , data_files=_lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCAmelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Tuple = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCAmelCase : Dict = [f"""ending{i}""" for i in range(4 )]
_lowerCAmelCase : Tuple = 'sent1'
_lowerCAmelCase : Any = 'sent2'
if data_args.max_seq_length is None:
_lowerCAmelCase : Optional[int] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_lowerCAmelCase : Any = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_lowerCAmelCase : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
_lowerCAmelCase : str = examples[question_header_name]
_lowerCAmelCase : Dict = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowerCamelCase )
]
# Flatten out
_lowerCAmelCase : Tuple = list(chain(*_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = list(chain(*_lowerCamelCase ) )
# Tokenize
_lowerCAmelCase : List[str] = tokenizer(
_lowerCamelCase , _lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowerCAmelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_lowerCAmelCase : Any = min(len(_lowerCamelCase ) , data_args.max_train_samples )
_lowerCAmelCase : Tuple = train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_lowerCAmelCase : Any = train_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowerCAmelCase : Any = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowerCAmelCase : str = min(len(_lowerCamelCase ) , data_args.max_eval_samples )
_lowerCAmelCase : str = eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_lowerCAmelCase : int = eval_dataset.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCAmelCase : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : Dict = eval_predictions
_lowerCAmelCase : Optional[Any] = np.argmax(_lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCAmelCase : Optional[int] = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : Dict = last_checkpoint
_lowerCAmelCase : int = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCAmelCase : int = train_result.metrics
_lowerCAmelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
_lowerCAmelCase : Union[str, Any] = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('train' , _lowerCamelCase )
trainer.save_metrics('train' , _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase : Optional[Any] = trainer.evaluate()
_lowerCAmelCase : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
_lowerCAmelCase : Dict = min(_lowerCamelCase , len(_lowerCamelCase ) )
trainer.log_metrics('eval' , _lowerCamelCase )
trainer.save_metrics('eval' , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 705 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : Any = downstream_dict['projector.weight']
_lowerCAmelCase : Any = downstream_dict['projector.bias']
_lowerCAmelCase : List[Any] = downstream_dict['model.post_net.linear.weight']
_lowerCAmelCase : List[Any] = downstream_dict['model.post_net.linear.bias']
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = downstream_dict['model.linear.weight']
_lowerCAmelCase : int = downstream_dict['model.linear.bias']
return model
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
_lowerCAmelCase : Tuple = downstream_dict['connector.weight']
_lowerCAmelCase : List[Any] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCAmelCase : Optional[int] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_lowerCAmelCase : str = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_lowerCAmelCase : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_lowerCAmelCase : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_lowerCAmelCase : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_lowerCAmelCase : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_lowerCAmelCase : str = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location='cpu' )
_lowerCAmelCase : Dict = checkpoint['Downstream']
_lowerCAmelCase : Union[str, Any] = UniSpeechSatConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase )
_lowerCAmelCase : List[str] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_lowerCAmelCase : Dict = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
_lowerCAmelCase : int = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif arch.endswith('ForXVector' ):
_lowerCAmelCase : Any = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_lowerCAmelCase : int = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 706 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 0 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=7 ,_A=True ,_A=True ,_A=True ,_A=True ,_A=99 ,_A=24 ,_A=2 ,_A=6 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=None ,_A=1000 ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Any = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = num_labels
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : Optional[int] = range_bbox
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Union[str, Any] = bbox[i, j, 3]
_lowerCAmelCase : str = bbox[i, j, 1]
_lowerCAmelCase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : str = bbox[i, j, 2]
_lowerCAmelCase : Optional[Any] = bbox[i, j, 0]
_lowerCAmelCase : List[str] = t
_lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_lowerCAmelCase : Dict = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : Any = LiltModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[str] = model(_A ,bbox=_A ,attention_mask=_A ,token_type_ids=_A )
_lowerCAmelCase : Tuple = model(_A ,bbox=_A ,token_type_ids=_A )
_lowerCAmelCase : Optional[int] = model(_A ,bbox=_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_labels
_lowerCAmelCase : List[str] = LiltForTokenClassification(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(
_A ,bbox=_A ,attention_mask=_A ,token_type_ids=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : int = LiltForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
_A ,bbox=_A ,attention_mask=_A ,token_type_ids=_A ,start_positions=_A ,end_positions=_A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : List[Any] = config_and_inputs
_lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
return True
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = LiltModelTester(self )
_lowerCAmelCase : str = ConfigTester(self ,config_class=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = LiltModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(_A )
_lowerCAmelCase : Dict = torch.tensor([[1, 2]] ,device=_A )
_lowerCAmelCase : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(input_ids=_A ,bbox=_A )
_lowerCAmelCase : Tuple = torch.Size([1, 2, 768] )
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] ,device=_A ,)
self.assertTrue(outputs.last_hidden_state.shape ,_A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,_A ,atol=1E-3 ) )
| 707 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0000.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=13 ,_A=30 ,_A=2 ,_A=3 ,_A=True ,_A=True ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=10 ,_A=0.0_2 ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Union[str, Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : List[Any] = num_patches + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[Any] = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_A ,initializer_range=self.initializer_range ,)
return config, pixel_values
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = FlaxViTModel(config=_A )
_lowerCAmelCase : str = model(_A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : str = (self.image_size, self.image_size)
_lowerCAmelCase : Optional[Any] = (self.patch_size, self.patch_size)
_lowerCAmelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = self.type_sequence_label_size
_lowerCAmelCase : Optional[Any] = FlaxViTForImageClassification(config=_A )
_lowerCAmelCase : str = model(_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : str = 1
_lowerCAmelCase : Tuple = FlaxViTForImageClassification(_A )
_lowerCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : List[Any] = model(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = FlaxViTModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(_A )
_lowerCAmelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : List[str] = self._prepare_for_class(_A ,_A )
_lowerCAmelCase : Optional[Any] = model_class(_A )
@jax.jit
def model_jitted(_A ,**_A ):
return model(pixel_values=_A ,**_A )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase : Any = model_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase : Dict = model_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) ,len(_A ) )
for jitted_output, output in zip(_A ,_A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_lowerCAmelCase : Any = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_lowerCAmelCase : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_A ) | 709 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_lowerCAmelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : int = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
_lowerCAmelCase : str = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A ,env=os.environ.copy() )
@require_multi_gpu
def __lowerCamelCase ( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
_lowerCAmelCase : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(_A ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase = Accelerator()
_lowerCAmelCase = (accelerator.state.process_index + 2, 1_0)
_lowerCAmelCase = torch.randint(0, 1_0, shape).to(accelerator.device)
_lowerCAmelCase = """"""
_lowerCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 16 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowerCAmelCase = get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with FSDP.state_dict_type(
_lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCAmelCase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCAmelCase : str = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCAmelCase : Optional[int] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
logger.info(f"""Saving model to {ckpt_dir}""" )
_lowerCAmelCase : List[Any] = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=_lowerCamelCase , storage_writer=dist_cp.FileSystemWriter(_lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_lowerCamelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
_lowerCAmelCase : Optional[Any] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Loading model from {input_model_file}""" )
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowerCAmelCase : Any = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Loading model from {input_model_file}""" )
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowerCAmelCase : Optional[Any] = (
os.path.join(_lowerCamelCase , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
_lowerCAmelCase : Any = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=_lowerCamelCase , storage_reader=dist_cp.FileSystemReader(_lowerCamelCase ) , planner=DefaultLoadPlanner() , )
_lowerCAmelCase : List[str] = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with FSDP.state_dict_type(
_lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowerCAmelCase : int = FSDP.optim_state_dict(_lowerCamelCase , _lowerCamelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowerCAmelCase : Any = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowerCAmelCase : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(_lowerCamelCase ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_lowerCamelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowerCAmelCase : Optional[int] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowerCAmelCase : int = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
_lowerCAmelCase : str = (
os.path.join(_lowerCamelCase , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
_lowerCAmelCase : List[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(_lowerCamelCase ) , )
_lowerCAmelCase : int = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
_lowerCAmelCase : str = FSDP.optim_state_dict_to_load(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
optimizer.load_state_dict(_lowerCamelCase )
| 710 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_lowerCAmelCase : List[str] = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
_lowerCAmelCase : int = False
_lowerCAmelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowerCAmelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 16 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=7 ,_A=True ,_A=True ,_A=False ,_A=True ,_A=99 ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=16 ,_A=2 ,_A=0.0_2 ,_A=3 ,_A=4 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : Dict = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCAmelCase : List[Any] = None
if self.use_input_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_A ,initializer_range=self.initializer_range ,pad_token_id=1 ,new_decoder_architecture=_A ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = FalconModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = model(_A ,attention_mask=_A )
_lowerCAmelCase : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : str = FalconModel(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)
_lowerCAmelCase : Union[str, Any] = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,)
_lowerCAmelCase : Optional[Any] = model(_A ,attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = FalconForCausalLM(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Tuple = model(_A ,attention_mask=_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : int = True
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : int = FalconForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
_lowerCAmelCase : Optional[int] = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,use_cache=_A ,)
_lowerCAmelCase : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase : Dict = torch.cat([input_ids, next_tokens] ,dim=-1 )
_lowerCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
_lowerCAmelCase : str = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,output_hidden_states=_A ,)['hidden_states'][0]
_lowerCAmelCase : Optional[Any] = model(
_A ,attention_mask=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,past_key_values=_A ,output_hidden_states=_A ,)['hidden_states'][0]
# select random slice
_lowerCAmelCase : List[Any] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
_lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A ,_A ,atol=1E-3 ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : int = config_and_inputs
_lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = FalconModelTester(self )
_lowerCAmelCase : int = ConfigTester(self ,config_class=_A ,hidden_size=37 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowerCAmelCase : Optional[int] = alibi
self.model_tester.create_and_check_model(_A ,*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[Any] = input_dict['input_ids']
_lowerCAmelCase : Union[str, Any] = input_ids.ne(1 ).to(_A )
_lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : List[Any] = model(_A ,attention_mask=_A ,labels=_A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = 3
_lowerCAmelCase : Dict = 'single_label_classification'
_lowerCAmelCase : Union[str, Any] = input_dict['input_ids']
_lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(_A )
_lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A ,attention_mask=_A ,labels=_A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[str] = input_dict['input_ids']
_lowerCAmelCase : int = FalconForCausalLM(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_A ,use_cache=_A )
_lowerCAmelCase : str = input_ids.shape[0]
_lowerCAmelCase : Optional[Any] = model._convert_to_rw_cache(result.past_key_values )
_lowerCAmelCase : int = model._convert_cache_to_standard_format(_A ,_A )
for layer in range(len(_A ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Union[str, Any] = 3
_lowerCAmelCase : Optional[Any] = 'multi_label_classification'
_lowerCAmelCase : List[str] = input_dict['input_ids']
_lowerCAmelCase : Optional[Any] = input_ids.ne(1 ).to(_A )
_lowerCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase : Tuple = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A ,attention_mask=_A ,labels=_A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_A ,'use_cache' ):
return
_lowerCAmelCase : Dict = model_class(_A ).to(_A )
if "use_cache" not in inputs:
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Any = model(**_A )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowerCAmelCase : List[Any] = (
getattr(_A ,'decoder_layers' ,_A )
or getattr(_A ,'num_decoder_layers' ,_A )
or config.num_hidden_layers
)
_lowerCAmelCase : Dict = getattr(_A ,'num_kv_heads' ,config.num_attention_heads )
_lowerCAmelCase : Any = getattr(_A ,'d_model' ,config.hidden_size )
_lowerCAmelCase : Dict = embed_dim // num_attention_heads
_lowerCAmelCase : Optional[Any] = outputs['past_key_values']
self.assertEqual(len(_A ) ,_A )
_lowerCAmelCase : Any = inputs['input_ids'].shape
for i in range(_A ):
if config.new_decoder_architecture:
_lowerCAmelCase : int = config.num_attention_heads
elif config.multi_query:
_lowerCAmelCase : Tuple = 1
self.assertEqual(len(past_kv[0] ) ,2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
_lowerCAmelCase : int = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(_A )
_lowerCAmelCase : str = tokenizer('My favorite food is' ,return_tensors='pt' ).to(_A )
_lowerCAmelCase : int = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
_lowerCAmelCase : Optional[int] = model.generate(**_A ,do_sample=_A ,max_new_tokens=19 )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(_A )[0]
self.assertEqual(_A ,_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_A )
_lowerCAmelCase : Optional[Any] = FalconForCausalLM.from_pretrained(_A )
model.eval()
model.to(_A )
_lowerCAmelCase : Optional[Any] = tokenizer('My favorite food is' ,return_tensors='pt' ).to(_A )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_A ,do_sample=_A ,max_new_tokens=4 )
model.generate(**_A ,do_sample=_A ,max_new_tokens=4 )
model.generate(**_A ,num_beams=2 ,max_new_tokens=4 )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_A )
_lowerCAmelCase : List[Any] = FalconForCausalLM.from_pretrained(_A )
model.eval()
model.to(device=_A )
_lowerCAmelCase : Optional[Any] = tokenizer('My favorite food is' ,return_tensors='pt' ).to(_A )
# Test results are the same with and without cache
_lowerCAmelCase : Optional[int] = model.generate(**_A ,do_sample=_A ,max_new_tokens=20 ,use_cache=_A )
_lowerCAmelCase : Union[str, Any] = model.generate(**_A ,do_sample=_A ,max_new_tokens=20 ,use_cache=_A )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 711 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self ,_A ,_A ,*_A ,**_A ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
_lowerCAmelCase : Tuple = kwargs.pop('main_process_only' ,_A )
_lowerCAmelCase : Any = kwargs.pop('in_order' ,_A )
if self.isEnabledFor(_A ):
if self._should_log(_A ):
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
elif in_order:
_lowerCAmelCase : str = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.process(_A ,_A )
self.logger.log(_A ,_A ,*_A ,**_A )
state.wait_for_everyone()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None ):
'''simple docstring'''
if log_level is None:
_lowerCAmelCase : Union[str, Any] = os.environ.get('ACCELERATE_LOG_LEVEL' , _lowerCamelCase )
_lowerCAmelCase : int = logging.getLogger(_lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCamelCase , {} )
| 16 | 0 |
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def update_area_of_max_square(_lowerCamelCase , _lowerCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_lowerCAmelCase : int = update_area_of_max_square(_lowerCamelCase , col + 1 )
_lowerCAmelCase : List[str] = update_area_of_max_square(row + 1 , col + 1 )
_lowerCAmelCase : List[Any] = update_area_of_max_square(row + 1 , _lowerCamelCase )
if mat[row][col]:
_lowerCAmelCase : Any = 1 + min([right, diagonal, down] )
_lowerCAmelCase : List[Any] = max(largest_square_area[0] , _lowerCamelCase )
return sub_problem_sol
else:
return 0
_lowerCAmelCase : Union[str, Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_lowerCAmelCase : List[str] = update_area_of_max_square_using_dp_array(_lowerCamelCase , col + 1 , _lowerCamelCase )
_lowerCAmelCase : List[str] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , _lowerCamelCase , _lowerCamelCase )
if mat[row][col]:
_lowerCAmelCase : int = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Any = max(largest_square_area[0] , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
_lowerCAmelCase : int = [0]
_lowerCAmelCase : Tuple = [[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _lowerCamelCase )
return largest_square_area[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
_lowerCAmelCase : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : str = dp_array[row][col + 1]
_lowerCAmelCase : int = dp_array[row + 1][col + 1]
_lowerCAmelCase : Dict = dp_array[row + 1][col]
if mat[row][col] == 1:
_lowerCAmelCase : Tuple = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = max(dp_array[row][col] , _lowerCamelCase )
else:
_lowerCAmelCase : List[str] = 0
return largest_square_area
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [0] * (cols + 1)
_lowerCAmelCase : List[Any] = [0] * (cols + 1)
_lowerCAmelCase : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_lowerCAmelCase : Optional[Any] = current_row[col + 1]
_lowerCAmelCase : int = next_row[col + 1]
_lowerCAmelCase : Union[str, Any] = next_row[col]
if mat[row][col] == 1:
_lowerCAmelCase : Optional[int] = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = max(current_row[col] , _lowerCamelCase )
else:
_lowerCAmelCase : str = 0
_lowerCAmelCase : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 712 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=13 ,_A=10 ,_A=3 ,_A=2 ,_A=2 ,_A=True ,_A=True ,_A=32 ,_A=5 ,_A=4 ,_A=37 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=10 ,_A=0.0_2 ,_A="divided_space_time" ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : int = num_frames
_lowerCAmelCase : Any = is_training
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : List[Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_type
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : Any = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_lowerCAmelCase : str = (image_size // patch_size) ** 2
_lowerCAmelCase : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
_lowerCAmelCase : List[str] = self.num_labels
return config
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TimesformerModel(config=_A )
model.to(_A )
model.eval()
_lowerCAmelCase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = TimesformerForVideoClassification(_A )
model.to(_A )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_A )
# verify the logits shape
_lowerCAmelCase : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TimesformerModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(
self ,config_class=_A ,has_text_modality=_A ,hidden_size=37 )
def __lowerCamelCase ( self ,_A ,_A ,_A=False ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = copy.deepcopy(_A )
if return_labels:
if model_class in get_values(_A ):
_lowerCAmelCase : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_A )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A ,nn.Linear ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(_A )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = TimesformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[str] = True
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = self.model_tester.seq_length
_lowerCAmelCase : Union[str, Any] = self.model_tester.num_frames
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : str = False
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : str = outputs.attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
_lowerCAmelCase : Optional[Any] = len(_A )
# Check attention is always last and order is fine
_lowerCAmelCase : int = True
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Dict = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : str = model(**self._prepare_for_class(_A ,_A ) )
self.assertEqual(out_len + 1 ,len(_A ) )
_lowerCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(_A ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : List[Any] = outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_A ) ,_A )
_lowerCAmelCase : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : str = True
check_hidden_states_output(_A ,_A ,_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_lowerCAmelCase : Any = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
_A )
_lowerCAmelCase : List[Any] = self.default_image_processor
_lowerCAmelCase : Tuple = prepare_video()
_lowerCAmelCase : int = image_processor(video[:8] ,return_tensors='pt' ).to(_A )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Optional[Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_A ,atol=1E-4 ) )
| 713 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = DanceDiffusionPipeline
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_UpperCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=1_6000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=_A ,use_timestep_embedding=_A ,time_embedding_type='fourier' ,mid_block_type='UNetMidBlock1D' ,down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') ,up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') ,)
_lowerCAmelCase : int = IPNDMScheduler()
_lowerCAmelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : str = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : int = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = DanceDiffusionPipeline(**_A )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : List[str] = pipe(**_A )
_lowerCAmelCase : List[Any] = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowerCAmelCase : Optional[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __lowerCamelCase ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def __lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = torch_device
_lowerCAmelCase : int = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
_lowerCAmelCase : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[int] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : str = output.audios
_lowerCAmelCase : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : Union[str, Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = torch_device
_lowerCAmelCase : Tuple = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' ,torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(generator=_A ,num_inference_steps=100 ,audio_length_in_s=4.0_9_6 )
_lowerCAmelCase : Union[str, Any] = output.audios
_lowerCAmelCase : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowerCAmelCase : List[str] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 16 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=7 ,_A=3 ,_A=30 ,_A=400 ,_A=True ,_A=None ,_A=0.9 ,_A=None ,_A=True ,_A=[0.5, 0.5, 0.5] ,_A=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = size if size is not None else {'shortest_edge': 30}
_lowerCAmelCase : Any = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : str = min_resolution
_lowerCAmelCase : List[str] = max_resolution
_lowerCAmelCase : Any = do_resize_and_center_crop
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : List[Any] = crop_pct
_lowerCAmelCase : Optional[Any] = crop_size
_lowerCAmelCase : Optional[Any] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean
_lowerCAmelCase : Optional[int] = image_std
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A ,'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_A ,'size' ) )
self.assertTrue(hasattr(_A ,'crop_pct' ) )
self.assertTrue(hasattr(_A ,'do_normalize' ) )
self.assertTrue(hasattr(_A ,'image_mean' ) )
self.assertTrue(hasattr(_A ,'image_std' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size ,{'height': 30, 'width': 30} )
_lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A ,Image.Image )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Any = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_A ,torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A ,torch.Tensor )
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : List[Any] = image_processing(_A ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 714 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (("num_inference_steps", 25),)
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_A )
return config
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Union[str, Any] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
_lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase, _lowerCAmelCase : str = sample, sample
for t in range(_A ,time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=0 ,**_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : List[str] = kwargs.pop('num_inference_steps' ,_A )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
_lowerCAmelCase : int = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Union[str, Any] = new_scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowerCamelCase ( self ,_A=None ,**_A ):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : int = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config(**_A )
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_A )
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : int = scheduler_class(**_A )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(_A ,_A )
_lowerCAmelCase : Union[str, Any] = scheduler.step(_A ,_A ,_A ).prev_sample
return sample
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop('num_inference_steps' ,_A )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**_A )
_lowerCAmelCase : Any = self.dummy_sample
_lowerCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_A ,'set_timesteps' ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A ,'set_timesteps' ):
_lowerCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Any = scheduler.timesteps[5]
_lowerCAmelCase : List[str] = scheduler.timesteps[6]
_lowerCAmelCase : List[str] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step(_A ,_A ,_A ,**_A ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = UniPCMultistepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
_lowerCAmelCase : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=_A )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A ,prediction_type=_A ,sample_max_value=_A ,solver_order=_A ,solver_type=_A ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
_lowerCAmelCase : List[Any] = self.full_loop(
solver_order=_A ,solver_type=_A ,prediction_type=_A ,)
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def __lowerCamelCase ( self ):
'''simple docstring'''
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_A ,time_step=0 )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(prediction_type='v_prediction' )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(thresholding=_A ,dynamic_thresholding_ratio=0 )
_lowerCAmelCase : Tuple = scheduler_class(**_A )
_lowerCAmelCase : Optional[Any] = 10
_lowerCAmelCase : Union[str, Any] = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Tuple = model(_A ,_A )
_lowerCAmelCase : Dict = scheduler.step(_A ,_A ,_A ).prev_sample
assert sample.dtype == torch.floataa
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Dict = self.get_scheduler_config(**_A )
_lowerCAmelCase : str = scheduler_class(**_A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 16 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
_lowerCAmelCase : Tuple = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
_lowerCAmelCase : Optional[int] = 'huggingface/label-files'
if "o365" in model_name:
_lowerCAmelCase : int = 366
_lowerCAmelCase : Tuple = 'object365-id2label.json'
else:
_lowerCAmelCase : List[Any] = 91
_lowerCAmelCase : str = 'coco-detection-id2label.json'
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : int = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[int] = idalabel
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = val
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase : Optional[int] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_lowerCAmelCase : Union[str, Any] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[int] = in_proj_weight[:dim, :]
_lowerCAmelCase : List[str] = in_proj_bias[: dim]
_lowerCAmelCase : str = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase : List[str] = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase : List[Any] = in_proj_weight[
-dim :, :
]
_lowerCAmelCase : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_lowerCAmelCase : Any = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Union[str, Any] = in_proj_weight[:hidden_size, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[:hidden_size]
_lowerCAmelCase : List[str] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_lowerCAmelCase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[-hidden_size:, :]
_lowerCAmelCase : List[Any] = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
_lowerCAmelCase : List[str] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
_lowerCAmelCase : Tuple = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
_lowerCAmelCase : str = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_lowerCAmelCase : Dict = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : List[Any] = val
if "input_proj" in key:
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_lowerCAmelCase : Any = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : int = val
# finally, create HuggingFace model and load state dict
_lowerCAmelCase : List[str] = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
_lowerCAmelCase : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(_lowerCamelCase )
# load image processor
_lowerCAmelCase : int = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : str = processor(images=_lowerCamelCase , return_tensors='pt' )
_lowerCAmelCase : str = encoding['pixel_values']
_lowerCAmelCase : str = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_lowerCAmelCase : int = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_lowerCAmelCase : Optional[int] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_lowerCAmelCase : int = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1e-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCAmelCase = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : int = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_lowerCAmelCase : Tuple = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : Dict = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_lowerCAmelCase : str = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_lowerCAmelCase : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {}
import re
_lowerCAmelCase : Optional[Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Union[str, Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Tuple = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : Optional[int] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_lowerCAmelCase : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_lowerCAmelCase : List[str] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : int = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Optional[int] = prefix + resnet_block
_lowerCAmelCase : Dict = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : str = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_lowerCAmelCase : Any = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Dict = regex_match.groups()
_lowerCAmelCase : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Dict = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_lowerCAmelCase : Optional[int] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : str = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_lowerCAmelCase : str = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Any = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Any = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_lowerCAmelCase : List[str] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : Tuple = regex_match.groups()
_lowerCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = {'1': 1, '3': 2}[groups[-2]]
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_lowerCAmelCase : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : List[str] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Dict = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_lowerCAmelCase : Dict = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[Any] = original_key
_lowerCAmelCase : List[Any] = replace_key(_lowerCamelCase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
_lowerCAmelCase : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Union[str, Any] = original_key
_lowerCAmelCase : Optional[Any] = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_lowerCAmelCase : str = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_lowerCamelCase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_lowerCamelCase )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
_lowerCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
_lowerCAmelCase : Optional[Any] = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : int = []
_lowerCAmelCase : Any = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
_lowerCAmelCase : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_lowerCAmelCase : int = old_dic[k]
elif k.endswith('.w' ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Optional[Any] = old_dic[k]
_lowerCAmelCase : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
_lowerCAmelCase : Tuple = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 16 | 0 |
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCAmelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : int = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase : Any = get_sagemaker_input()
else:
_lowerCAmelCase : Optional[Any] = get_cluster_input()
return config
def lowerCamelCase__ ( _lowerCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
_lowerCAmelCase : List[Any] = subparsers.add_parser('config' , description=_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser('Accelerate config command' , description=_lowerCamelCase )
parser.add_argument(
'--config_file' , default=_lowerCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = get_user_input()
if args.config_file is not None:
_lowerCAmelCase : str = args.config_file
else:
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : int = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCamelCase )
else:
config.to_yaml_file(_lowerCamelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = config_command_parser()
_lowerCAmelCase : Dict = parser.parse_args()
config_command(_lowerCamelCase )
if __name__ == "__main__":
main()
| 716 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase = {"""UserAgent""": UserAgent().random}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = script.contents[0]
_lowerCAmelCase : Union[str, Any] = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F"""https://www.instagram.com/{username}/"""
_lowerCAmelCase : str = self.get_json()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = requests.get(self.url ,headers=_A ).text
_lowerCAmelCase : Optional[Any] = BeautifulSoup(_A ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self ):
'''simple docstring'''
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def lowerCamelCase__ ( _lowerCamelCase = "github" ):
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
_lowerCAmelCase : Tuple = InstagramUser(_lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 16 | 0 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not sentence:
return ""
_lowerCAmelCase : List[str] = dict(zip(_lowerCamelCase , _lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 16 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = super().to_dict()
for k, v in d.items():
if isinstance(_A ,_A ):
_lowerCAmelCase : Tuple = v.to_dict()
return d
| 718 |
"""simple docstring"""
import argparse
import struct
import unittest
class __UpperCamelCase :
def __init__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = data
# Initialize hash values
_lowerCAmelCase : Any = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
_lowerCAmelCase : str = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
_lowerCAmelCase : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
_lowerCAmelCase : Any = struct.pack('>Q' ,(len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_lowerCAmelCase : int = list(struct.unpack('>16L' ,_A ) )
# add 48 0-ed integers
words += [0] * 48
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
_lowerCAmelCase : List[str] = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
_lowerCAmelCase : Tuple = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
_lowerCAmelCase : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
_lowerCAmelCase : Optional[Any] = self.ror(_A ,6 ) ^ self.ror(_A ,11 ) ^ self.ror(_A ,25 )
_lowerCAmelCase : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
_lowerCAmelCase : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
_lowerCAmelCase : Union[str, Any] = self.ror(_A ,2 ) ^ self.ror(_A ,13 ) ^ self.ror(_A ,22 )
_lowerCAmelCase : Any = (a & b) ^ (a & c) ^ (b & c)
_lowerCAmelCase : Any = (sa + maj) % 0x1_0000_0000
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
_lowerCAmelCase : Any = [a, b, c, d, e, f, g, h]
# Modify final values
_lowerCAmelCase : int = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
_lowerCAmelCase : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
import hashlib
_lowerCAmelCase : Any = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(_A ).hash ,hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_lowerCAmelCase : Tuple = parser.parse_args()
_lowerCAmelCase : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_lowerCAmelCase : int = f.read()
else:
_lowerCAmelCase : int = bytes(_lowerCamelCase , 'utf-8' )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 16 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """spiece.model"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
_lowerCAmelCase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = "left"
def __init__( self ,_A ,_A=False ,_A=True ,_A=False ,_A="<s>" ,_A="</s>" ,_A="<unk>" ,_A="<sep>" ,_A="<pad>" ,_A="<cls>" ,_A="<mask>" ,_A=["<eop>", "<eod>"] ,_A = None ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(_A ,lstrip=_A ,rstrip=_A ) if isinstance(_A ,_A ) else mask_token
_lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A ,remove_space=_A ,keep_accents=_A ,bos_token=_A ,eos_token=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,additional_special_tokens=_A ,sp_model_kwargs=self.sp_model_kwargs ,**_A ,)
_lowerCAmelCase : int = 3
_lowerCAmelCase : Union[str, Any] = do_lower_case
_lowerCAmelCase : Dict = remove_space
_lowerCAmelCase : int = keep_accents
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.remove_space:
_lowerCAmelCase : str = ' '.join(inputs.strip().split() )
else:
_lowerCAmelCase : Dict = inputs
_lowerCAmelCase : List[str] = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' )
if not self.keep_accents:
_lowerCAmelCase : Optional[Any] = unicodedata.normalize('NFKD' ,_A )
_lowerCAmelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
_lowerCAmelCase : Tuple = outputs.lower()
return outputs
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.preprocess_text(_A )
_lowerCAmelCase : int = self.sp_model.encode(_A ,out_type=_A )
_lowerCAmelCase : int = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A ,'' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase : int = cur_pieces[1:]
else:
_lowerCAmelCase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.PieceToId(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.sp_model.IdToPiece(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ''.join(_A ).replace(_A ,' ' ).strip()
return out_string
def __lowerCamelCase ( self ,_A ,_A = False ,_A = None ,_A = True ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('use_source_tokenizer' ,_A )
_lowerCAmelCase : Dict = self.convert_ids_to_tokens(_A ,skip_special_tokens=_A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
_lowerCAmelCase : Tuple = []
sub_texts.append(_A )
else:
current_sub_text.append(_A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase : List[Any] = ''.join(_A )
_lowerCAmelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : int = self.clean_up_tokenization(_A )
return clean_text
else:
return text
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCamelCase ( self ,_A ,_A = None ,_A = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A ,token_ids_a=_A ,already_has_special_tokens=_A )
if token_ids_a is not None:
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1]
return ([0] * len(_A )) + [1, 1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_A )
elif not os.path.isfile(self.vocab_file ):
with open(_A ,'wb' ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 719 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_lowerCAmelCase : dict = {}
# Stores current size of heap.
_lowerCAmelCase : Union[str, Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_lowerCAmelCase : Union[str, Any] = key or (lambda _A : x)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Tuple = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_lowerCAmelCase, _lowerCAmelCase : Tuple = self.arr[j], self.arr[i]
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self._left(_A )
_lowerCAmelCase : str = self._right(_A )
_lowerCAmelCase : Tuple = i
if left is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : int = left
if right is not None and not self._cmp(_A ,_A ):
_lowerCAmelCase : Optional[int] = right
return valid_parent
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self._parent(_A )
while parent is not None and not self._cmp(_A ,_A ):
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : List[str] = parent, self._parent(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._get_valid_parent(_A )
while valid_parent != index:
self._swap(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : int = self.pos_map[item]
_lowerCAmelCase : Dict = [item, self.key(_A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if item not in self.pos_map:
return
_lowerCAmelCase : List[str] = self.pos_map[item]
del self.pos_map[item]
_lowerCAmelCase : Dict = self.arr[self.size - 1]
_lowerCAmelCase : Optional[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_A )
self._heapify_down(_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_A )] )
else:
_lowerCAmelCase : Any = [item, self.key(_A )]
_lowerCAmelCase : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowerCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
import pytest
_lowerCAmelCase = """__dummy_dataset1__"""
_lowerCAmelCase = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def lowerCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = dataset_loading_script_name
_lowerCAmelCase : Union[str, Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=_lowerCamelCase )
_lowerCAmelCase : Any = script_dir / f"""{script_name}.py"""
with open(_lowerCamelCase , 'w' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
| 720 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = 42
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 32 ,_A = 64 ,_A = 20 ,_A = 768 ,_A=77 ,_A=4 ,_A = 0.0 ,_A = "silu" ,_A = None ,_A = None ,_A = "linear" ,_A = "prd" ,_A = None ,_A = None ,_A = None ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = attention_head_dim
_lowerCAmelCase : Tuple = num_attention_heads * attention_head_dim
_lowerCAmelCase : Optional[Any] = additional_embeddings
_lowerCAmelCase : Union[str, Any] = time_embed_dim or inner_dim
_lowerCAmelCase : Union[str, Any] = embedding_proj_dim or embedding_dim
_lowerCAmelCase : Optional[int] = clip_embed_dim or embedding_dim
_lowerCAmelCase : int = Timesteps(_A ,_A ,0 )
_lowerCAmelCase : int = TimestepEmbedding(_A ,_A ,out_dim=_A ,act_fn=_A )
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
if embedding_proj_norm_type is None:
_lowerCAmelCase : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase : List[Any] = nn.LayerNorm(_A )
else:
raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
_lowerCAmelCase : Tuple = nn.Linear(_A ,_A )
if encoder_hid_proj_type is None:
_lowerCAmelCase : int = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase : List[Any] = nn.Linear(_A ,_A )
else:
raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_A ) )
if added_emb_type == "prd":
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,1 ,_A ) )
elif added_emb_type is None:
_lowerCAmelCase : List[Any] = None
else:
raise ValueError(
F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
_lowerCAmelCase : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A ,_A ,_A ,dropout=_A ,activation_fn='gelu' ,attention_bias=_A ,)
for d in range(_A )
] )
if norm_in_type == "layer":
_lowerCAmelCase : Any = nn.LayerNorm(_A )
elif norm_in_type is None:
_lowerCAmelCase : Any = None
else:
raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" )
_lowerCAmelCase : Union[str, Any] = nn.LayerNorm(_A )
_lowerCAmelCase : int = nn.Linear(_A ,_A )
_lowerCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
_lowerCAmelCase : Tuple = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,_A ,persistent=_A )
_lowerCAmelCase : Tuple = nn.Parameter(torch.zeros(1 ,_A ) )
_lowerCAmelCase : Dict = nn.Parameter(torch.zeros(1 ,_A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {}
def fn_recursive_add_processors(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
_lowerCAmelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,_A ,_A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A ,_A ,_A )
return processors
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_A ,_A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A ,_A ,_A ):
if hasattr(_A ,'set_processor' ):
if not isinstance(_A ,_A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,_A ,_A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A = None ,_A = None ,_A = True ,):
'''simple docstring'''
_lowerCAmelCase : str = hidden_states.shape[0]
_lowerCAmelCase : int = timestep
if not torch.is_tensor(_A ):
_lowerCAmelCase : str = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[int] = timesteps * torch.ones(_A ,dtype=timesteps.dtype ,device=timesteps.device )
_lowerCAmelCase : Dict = self.time_proj(_A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase : Any = timesteps_projected.to(dtype=self.dtype )
_lowerCAmelCase : Optional[Any] = self.time_embedding(_A )
if self.embedding_proj_norm is not None:
_lowerCAmelCase : int = self.embedding_proj_norm(_A )
_lowerCAmelCase : str = self.embedding_proj(_A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase : str = self.encoder_hidden_states_proj(_A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCAmelCase : Any = self.proj_in(_A )
_lowerCAmelCase : Dict = self.positional_embedding.to(hidden_states.dtype )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCAmelCase : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCAmelCase : Any = hidden_states[:, None, :]
_lowerCAmelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase : List[str] = self.prd_embedding.to(hidden_states.dtype ).expand(_A ,-1 ,-1 )
additional_embeds.append(_A )
_lowerCAmelCase : List[str] = torch.cat(
_A ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase : Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase : Any = F.pad(
_A ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase : int = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
_lowerCAmelCase : Union[str, Any] = F.pad(_A ,(0, self.additional_embeddings) ,value=0.0 )
_lowerCAmelCase : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCAmelCase : Optional[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
_lowerCAmelCase : Any = self.norm_in(_A )
for block in self.transformer_blocks:
_lowerCAmelCase : int = block(_A ,attention_mask=_A )
_lowerCAmelCase : Union[str, Any] = self.norm_out(_A )
if self.prd_embedding is not None:
_lowerCAmelCase : Optional[int] = hidden_states[:, -1]
else:
_lowerCAmelCase : Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase : Optional[int] = self.proj_to_clip_embeddings(_A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(_lowerCamelCase ):
if len(_lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowerCamelCase ) )
return data_lists
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[str] = min(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = max(_lowerCamelCase )
_lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCAmelCase : Any = f"""Invalid weight of {weight:f} provided"""
raise ValueError(_lowerCamelCase )
score_lists.append(_lowerCamelCase )
return score_lists
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = final_scores[j] + ele
return final_scores
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = get_data(_lowerCamelCase )
_lowerCAmelCase : str = calculate_each_score(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = generate_final_scores(_lowerCamelCase )
# append scores to source data
for i, ele in enumerate(_lowerCamelCase ):
source_data[i].append(_lowerCamelCase )
return source_data
| 721 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCAmelCase = get_logger()
_lowerCAmelCase = None
class __UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self ,_A=None ,_A=None ,**_A ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A ,_A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
_lowerCAmelCase : int = device if isinstance(_A ,_A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_lowerCAmelCase : List[str] = str(jax.devices()[0] )
_lowerCAmelCase : int = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A ,axis=0 )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
_lowerCAmelCase : Optional[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCAmelCase : List[str] = {'dtype': jnp.intaa}
else:
_lowerCAmelCase : Tuple = {'dtype': jnp.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
_lowerCAmelCase : Any = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
_lowerCAmelCase : int = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCAmelCase : Optional[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A ,**{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A ,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A ,'__array__' ) and not isinstance(_A ,jax.Array ):
_lowerCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
_lowerCAmelCase : int = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(_A )
_lowerCAmelCase : List[Any] = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
_lowerCAmelCase : Optional[Any] = self.recursive_tensorize(_A )
_lowerCAmelCase : Optional[Any] = self._consolidate(_A )
return column
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.numpy_arrow_extractor().extract_batch(_A )
_lowerCAmelCase : Any = self.python_features_decoder.decode_batch(_A )
_lowerCAmelCase : str = self.recursive_tensorize(_A )
for column_name in batch:
_lowerCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 16 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowerCAmelCase : List[str] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
_lowerCAmelCase : Optional[Any] = str(bin(_lowerCamelCase ) )[2:]
_lowerCAmelCase : Optional[int] = max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = ["vqvae"]
def __init__( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A ,scheduler=_A ,mel=_A ,vqvae=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_A ) else 1000
@torch.no_grad()
def __call__( self ,_A = 1 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = None ,_A = 0 ,_A = 0 ,_A = None ,_A = 0 ,_A = None ,_A = None ,_A=True ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
_lowerCAmelCase : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_lowerCAmelCase : Optional[Any] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_A ,device=self.device ,)
_lowerCAmelCase : Dict = noise
_lowerCAmelCase : Optional[Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A ,_A )
_lowerCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_A )
_lowerCAmelCase : int = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
_lowerCAmelCase : int = (input_image / 255) * 2 - 1
_lowerCAmelCase : str = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
_lowerCAmelCase : List[Any] = self.vqvae.encode(torch.unsqueeze(_A ,0 ) ).latent_dist.sample(
generator=_A )[0]
_lowerCAmelCase : Tuple = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_lowerCAmelCase : List[Any] = self.scheduler.add_noise(_A ,_A ,self.scheduler.timesteps[start_step - 1] )
_lowerCAmelCase : Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_lowerCAmelCase : Optional[Any] = int(mask_start_secs * pixels_per_second )
_lowerCAmelCase : Optional[int] = int(mask_end_secs * pixels_per_second )
_lowerCAmelCase : int = self.scheduler.add_noise(_A ,_A ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_A ):
_lowerCAmelCase : str = self.unet(_A ,_A ,_A )['sample']
else:
_lowerCAmelCase : Any = self.unet(_A ,_A )['sample']
if isinstance(self.scheduler ,_A ):
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,eta=_A ,generator=_A ,)['prev_sample']
else:
_lowerCAmelCase : Any = self.scheduler.step(
model_output=_A ,timestep=_A ,sample=_A ,generator=_A ,)['prev_sample']
if mask is not None:
if mask_start > 0:
_lowerCAmelCase : Any = mask[:, step, :, :mask_start]
if mask_end > 0:
_lowerCAmelCase : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_lowerCAmelCase : Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
_lowerCAmelCase : Any = self.vqvae.decode(_A )['sample']
_lowerCAmelCase : Any = (images / 2 + 0.5).clamp(0 ,1 )
_lowerCAmelCase : Tuple = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
_lowerCAmelCase : Any = (images * 255).round().astype('uint8' )
_lowerCAmelCase : Any = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A ,mode='RGB' ).convert('L' ) for _ in images) )
_lowerCAmelCase : Dict = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_A ) )
@torch.no_grad()
def __lowerCamelCase ( self ,_A ,_A = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_A )
self.scheduler.set_timesteps(_A )
_lowerCAmelCase : Dict = np.array(
[np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
_lowerCAmelCase : Dict = (sample / 255) * 2 - 1
_lowerCAmelCase : List[str] = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
_lowerCAmelCase : Tuple = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t]
_lowerCAmelCase : Dict = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t
_lowerCAmelCase : Union[str, Any] = self.unet(_A ,_A )['sample']
_lowerCAmelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_lowerCAmelCase : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __lowerCamelCase ( _A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = acos(torch.dot(torch.flatten(_A ) ,torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 16 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = "blenderbot-small"
_UpperCAmelCase = ["past_key_values"]
_UpperCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self ,_A=5_0265 ,_A=512 ,_A=8 ,_A=2048 ,_A=16 ,_A=8 ,_A=2048 ,_A=16 ,_A=0.0 ,_A=0.0 ,_A=True ,_A=True ,_A="gelu" ,_A=512 ,_A=0.1 ,_A=0.0 ,_A=0.0 ,_A=0.0_2 ,_A=1 ,_A=False ,_A=0 ,_A=1 ,_A=2 ,_A=2 ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[str] = d_model
_lowerCAmelCase : List[Any] = encoder_ffn_dim
_lowerCAmelCase : int = encoder_layers
_lowerCAmelCase : List[str] = encoder_attention_heads
_lowerCAmelCase : List[Any] = decoder_ffn_dim
_lowerCAmelCase : Dict = decoder_layers
_lowerCAmelCase : Tuple = decoder_attention_heads
_lowerCAmelCase : str = dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : str = activation_dropout
_lowerCAmelCase : Dict = activation_function
_lowerCAmelCase : Optional[Any] = init_std
_lowerCAmelCase : Optional[Any] = encoder_layerdrop
_lowerCAmelCase : Tuple = decoder_layerdrop
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Union[str, Any] = encoder_layers
_lowerCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,is_encoder_decoder=_A ,decoder_start_token_id=_A ,forced_eos_token_id=_A ,**_A ,)
class __UpperCamelCase ( a__ ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowerCAmelCase : str = {0: 'batch'}
_lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
_lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowerCAmelCase : Any = self.num_layers
for i in range(_A ):
_lowerCAmelCase : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
_lowerCAmelCase : int = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowerCAmelCase : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Union[str, Any] = super().outputs
else:
_lowerCAmelCase : Optional[int] = super(_A ,self ).outputs
if self.use_past:
_lowerCAmelCase : Optional[Any] = self.num_layers
for i in range(_A ):
_lowerCAmelCase : Any = {0: 'batch', 2: 'past_sequence + sequence'}
_lowerCAmelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A ,_A ,_A ,_A ,_A )
# Generate decoder inputs
_lowerCAmelCase : List[str] = seq_length if not self.use_past else 1
_lowerCAmelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A ,_A ,_A ,_A ,_A )
_lowerCAmelCase : str = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase : Optional[int] = dict(**_A ,**_A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase : str = common_inputs['input_ids'].shape
_lowerCAmelCase : Tuple = common_inputs['decoder_input_ids'].shape[1]
_lowerCAmelCase : int = self.num_attention_heads
_lowerCAmelCase : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : List[Any] = decoder_seq_length + 3
_lowerCAmelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase : Optional[Any] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_A ,_A )] ,dim=1 )
_lowerCAmelCase : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase : Optional[Any] = self.num_layers
_lowerCAmelCase : List[Any] = min(_A ,_A )
_lowerCAmelCase : str = max(_A ,_A ) - min_num_layers
_lowerCAmelCase : Any = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
_lowerCAmelCase : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_A ,_A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A ,_A ,_A ,_A ,_A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase : List[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : str = seqlen + 2
_lowerCAmelCase : Dict = self.num_layers
_lowerCAmelCase : Any = self.num_attention_heads
_lowerCAmelCase : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase : Optional[int] = common_inputs['attention_mask'].dtype
_lowerCAmelCase : Dict = torch.cat(
[common_inputs['attention_mask'], torch.ones(_A ,_A ,dtype=_A )] ,dim=1 )
_lowerCAmelCase : str = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
_lowerCAmelCase : str = compute_effective_axis_dimension(
_A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : Optional[int] = tokenizer.num_special_tokens_to_add(_A )
_lowerCAmelCase : str = compute_effective_axis_dimension(
_A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : str = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase : Tuple = dict(tokenizer(_A ,return_tensors=_A ) )
return common_inputs
def __lowerCamelCase ( self ,_A ,_A = -1 ,_A = -1 ,_A = False ,_A = None ,):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A ,batch_size=_A ,seq_length=_A ,is_pair=_A ,framework=_A )
elif self.task == "causal-lm":
_lowerCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
_A ,batch_size=_A ,seq_length=_A ,is_pair=_A ,framework=_A )
else:
_lowerCAmelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A ,batch_size=_A ,seq_length=_A ,is_pair=_A ,framework=_A )
return common_inputs
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase : Optional[Any] = super()._flatten_past_key_values_(_A ,_A ,_A ,_A )
else:
_lowerCAmelCase : str = super(_A ,self )._flatten_past_key_values_(
_A ,_A ,_A ,_A )
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = val
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCAmelCase : Any = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
_lowerCAmelCase : Any = value
else:
_lowerCAmelCase : Any = value
return new_state_dict
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCAmelCase : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : List[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[:256, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[:256]
_lowerCAmelCase : int = in_proj_weight[256:512, :]
_lowerCAmelCase : str = in_proj_bias[256:512]
_lowerCAmelCase : str = in_proj_weight[-256:, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCAmelCase : str = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCAmelCase : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Dict = in_proj_weight[:256, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[:256]
_lowerCAmelCase : List[str] = in_proj_weight[256:512, :]
_lowerCAmelCase : int = in_proj_bias[256:512]
_lowerCAmelCase : Any = in_proj_weight[-256:, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowerCAmelCase : List[Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCAmelCase : Union[str, Any] = in_proj_weight_cross_attn[:256, :]
_lowerCAmelCase : str = in_proj_bias_cross_attn[:256]
_lowerCAmelCase : Tuple = in_proj_weight_cross_attn[256:512, :]
_lowerCAmelCase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowerCAmelCase : List[str] = in_proj_weight_cross_attn[-256:, :]
_lowerCAmelCase : List[Any] = in_proj_bias_cross_attn[-256:]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = image.size
_lowerCAmelCase : Optional[Any] = max(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = 800 if 'detection' in checkpoint_url else 1000
_lowerCAmelCase : int = target_max_size / current_max_size
_lowerCAmelCase : List[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = F.to_tensor(_lowerCamelCase )
_lowerCAmelCase : Dict = F.normalize(_lowerCamelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logger.info('Converting model...' )
# load original state dict
_lowerCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = rename_backbone_keys(_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCAmelCase : Optional[Any] = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowerCAmelCase : List[str] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Tuple = val
# create HuggingFace model and load state dict
_lowerCAmelCase : Union[str, Any] = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_lowerCAmelCase : int = 15
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Union[str, Any] = {0: 'table', 1: 'table rotated'}
_lowerCAmelCase : Optional[Any] = idalabel
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
else:
_lowerCAmelCase : Any = 125
_lowerCAmelCase : Any = 6
_lowerCAmelCase : int = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
_lowerCAmelCase : Any = idalabel
_lowerCAmelCase : str = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : int = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
_lowerCAmelCase : List[Any] = TableTransformerForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# verify our conversion
_lowerCAmelCase : Dict = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
_lowerCAmelCase : Optional[Any] = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=_lowerCamelCase )
_lowerCAmelCase : List[str] = Image.open(_lowerCamelCase ).convert('RGB' )
_lowerCAmelCase : Tuple = normalize(resize(_lowerCamelCase , _lowerCamelCase ) ).unsqueeze(0 )
_lowerCAmelCase : str = model(_lowerCamelCase )
if "detection" in checkpoint_url:
_lowerCAmelCase : List[Any] = (1, 15, 3)
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_lowerCAmelCase : Optional[int] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_lowerCAmelCase : Tuple = (1, 125, 7)
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_lowerCAmelCase : List[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
_lowerCAmelCase : str = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(_lowerCamelCase )
image_processor.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 702 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __UpperCamelCase ( a__ , a__ ):
@register_to_config
def __init__( self ,_A = 128 ,_A = 256 ,_A = 2_0_0_0.0 ,_A = 768 ,_A = 12 ,_A = 12 ,_A = 64 ,_A = 2048 ,_A = 0.1 ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : int = nn.Sequential(
nn.Linear(_A ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_A ) ,nn.SiLU() ,)
_lowerCAmelCase : Any = nn.Embedding(_A ,_A )
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : int = nn.Dropout(p=_A )
_lowerCAmelCase : int = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
_lowerCAmelCase : Any = DecoderLayer(d_model=_A ,d_kv=_A ,num_heads=_A ,d_ff=_A ,dropout_rate=_A )
self.decoders.append(_A )
_lowerCAmelCase : Optional[Any] = TaLayerNorm(_A )
_lowerCAmelCase : List[str] = nn.Dropout(p=_A )
_lowerCAmelCase : Optional[Any] = nn.Linear(_A ,_A ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase : Any = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_lowerCAmelCase : Union[str, Any] = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase : Union[str, Any] = torch.broadcast_to(
torch.arange(_A ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_lowerCAmelCase : Any = self.position_encoding(_A )
_lowerCAmelCase : str = self.continuous_inputs_projection(_A )
inputs += position_encodings
_lowerCAmelCase : int = self.dropout(_A )
# decoder: No padding present.
_lowerCAmelCase : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase : Optional[Any] = [(x, self.encoder_decoder_mask(_A ,_A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase : Dict = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_lowerCAmelCase : Tuple = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase : Tuple = lyr(
_A ,conditioning_emb=_A ,encoder_hidden_states=_A ,encoder_attention_mask=_A ,)[0]
_lowerCAmelCase : Any = self.decoder_norm(_A )
_lowerCAmelCase : List[Any] = self.post_dropout(_A )
_lowerCAmelCase : int = self.spec_out(_A )
return spec_out
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A ,d_kv=_A ,num_heads=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A ,d_ff=_A ,dropout_rate=_A ,layer_norm_epsilon=_A ) )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,_A=None ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Any = self.layer[0](
_A ,conditioning_emb=_A ,attention_mask=_A ,)
if encoder_hidden_states is not None:
_lowerCAmelCase : Any = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase : str = self.layer[1](
_A ,key_value_states=_A ,attention_mask=_A ,)
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase : Optional[Any] = self.layer[-1](_A ,_A )
return (hidden_states,)
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = TaLayerNorm(_A )
_lowerCAmelCase : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Dict = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.FiLMLayer(_A ,_A )
# Self-attention block
_lowerCAmelCase : Union[str, Any] = self.attention(_A )
_lowerCAmelCase : Optional[Any] = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = Attention(query_dim=_A ,heads=_A ,dim_head=_A ,out_bias=_A ,scale_qk=_A )
_lowerCAmelCase : Optional[int] = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Tuple = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.layer_norm(_A )
_lowerCAmelCase : str = self.attention(
_A ,encoder_hidden_states=_A ,attention_mask=attention_mask.squeeze(1 ) ,)
_lowerCAmelCase : Any = hidden_states + self.dropout(_A )
return layer_output
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=_A ,d_ff=_A ,dropout_rate=_A )
_lowerCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 ,out_features=_A )
_lowerCAmelCase : Any = TaLayerNorm(_A ,eps=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.layer_norm(_A )
if conditioning_emb is not None:
_lowerCAmelCase : Union[str, Any] = self.film(_A ,_A )
_lowerCAmelCase : str = self.DenseReluDense(_A )
_lowerCAmelCase : Tuple = hidden_states + self.dropout(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Any = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Linear(_A ,_A ,bias=_A )
_lowerCAmelCase : Union[str, Any] = nn.Dropout(_A )
_lowerCAmelCase : int = NewGELUActivation()
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.act(self.wi_a(_A ) )
_lowerCAmelCase : Optional[int] = self.wi_a(_A )
_lowerCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
_lowerCAmelCase : Dict = self.dropout(_A )
_lowerCAmelCase : Dict = self.wo(_A )
return hidden_states
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A=1E-6 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Parameter(torch.ones(_A ) )
_lowerCAmelCase : Optional[int] = eps
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_A )
_lowerCAmelCase : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __UpperCamelCase ( nn.Module ):
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A ,3.0 )) ))
class __UpperCamelCase ( nn.Module ):
def __init__( self ,_A ,_A ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = nn.Linear(_A ,out_features * 2 ,bias=_A )
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.scale_bias(_A )
_lowerCAmelCase, _lowerCAmelCase : List[Any] = torch.chunk(_A ,2 ,-1 )
_lowerCAmelCase : List[Any] = x * (1 + scale) + shift
return x
| 16 | 0 |
"""simple docstring"""
import os
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
_lowerCAmelCase : List[str] = str(file.readlines()[0] )
_lowerCAmelCase : Optional[Any] = names.replace('"' , '' ).split(',' )
names.sort()
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Union[str, Any] = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
_lowerCAmelCase : Dict = 0
return total_score
if __name__ == "__main__":
print(solution())
| 703 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self ,_A ,_A=3 ,_A=32 ,_A=3 ,_A=10 ,_A=[10, 20, 30, 40] ,_A=[1, 1, 2, 1] ,_A=True ,_A=True ,_A="relu" ,_A=3 ,_A=None ,):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : int = image_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = embeddings_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : str = depths
_lowerCAmelCase : str = is_training
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Dict = scope
_lowerCAmelCase : Union[str, Any] = len(_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFResNetModel(config=_A )
_lowerCAmelCase : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowerCamelCase ( self ,_A ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase : Any = self.num_labels
_lowerCAmelCase : Dict = TFResNetForImageClassification(_A )
_lowerCAmelCase : int = model(_A ,labels=_A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
_UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFResNetModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self ,config_class=_A ,has_text_modality=_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(_A ,_A ,_A ):
_lowerCAmelCase : int = model_class(_A )
_lowerCAmelCase : int = model(**self._prepare_for_class(_A ,_A ) )
_lowerCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : int = self.model_tester.num_stages
self.assertEqual(len(_A ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowerCAmelCase, _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase : Optional[int] = layer_type
_lowerCAmelCase : Tuple = True
check_hidden_states_output(_A ,_A ,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(_A ,_A ,_A )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : int = image_processor(images=_A ,return_tensors='tf' )
# forward pass
_lowerCAmelCase : int = model(**_A )
# verify the logits
_lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_A )
_lowerCAmelCase : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_A ,atol=1E-4 ) )
| 16 | 0 |
import math
from collections.abc import Callable
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : float = xa
_lowerCAmelCase : float = xa
while True:
if x_n == x_na or function(_lowerCamelCase ) == function(_lowerCamelCase ):
raise ZeroDivisionError('float division by zero, could not find root' )
_lowerCAmelCase : float = x_na - (
function(_lowerCamelCase ) / ((function(_lowerCamelCase ) - function(_lowerCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
_lowerCAmelCase : Tuple = x_na
_lowerCAmelCase : int = x_na
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return math.pow(_lowerCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 704 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase, _lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 16 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=_A ,)
assert hasattr(self ,'env' )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = {
'enabled': True,
'processes_per_host': 8,
}
_lowerCAmelCase : Dict = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowerCAmelCase : List[str] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowerCAmelCase : Dict = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=_A ,instance_type=self.instance_type ,debugger_hook_config=_A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=_A ,py_version='py36' ,)
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
_lowerCAmelCase : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCAmelCase : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCAmelCase : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,_A )
| 705 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_lowerCAmelCase = list[list[float | int]]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = matrix[row][col]
_lowerCAmelCase : Tuple = vector[row][0]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Any = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase , _lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowerCamelCase ):
_lowerCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : int = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowerCamelCase )
]
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Optional[int] = y_val
_lowerCAmelCase : List[Any] = solve(_lowerCamelCase , _lowerCamelCase )
def interpolated_func(_lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _lowerCamelCase = question_function , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 , order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Any = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 706 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
_lowerCAmelCase = """2020.9.26"""
_lowerCAmelCase = """xcodz-dot, cclaus, dhruvmanila"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not all(isinstance(_lowerCamelCase , (float, int) ) for val in locals().values() ):
_lowerCAmelCase : Any = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(_lowerCamelCase )
_lowerCAmelCase : Any = ((x * distance) / (z + distance)) * scale
_lowerCAmelCase : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError('Axis must be a str' )
_lowerCAmelCase : Union[str, Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowerCamelCase , (float, int) ) for val in input_variables.values() ):
_lowerCAmelCase : List[str] = (
'Input values except axis must either be float or int: '
f"""{list(input_variables.values() )}"""
)
raise TypeError(_lowerCamelCase )
_lowerCAmelCase : Tuple = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_lowerCAmelCase : Tuple = x * math.cos(_lowerCamelCase ) - y * math.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = y * math.cos(_lowerCamelCase ) + x * math.sin(_lowerCamelCase )
_lowerCAmelCase : List[Any] = z
elif axis == "x":
_lowerCAmelCase : str = y * math.cos(_lowerCamelCase ) - z * math.sin(_lowerCamelCase )
_lowerCAmelCase : List[Any] = z * math.cos(_lowerCamelCase ) + y * math.sin(_lowerCamelCase )
_lowerCAmelCase : Dict = x
elif axis == "y":
_lowerCAmelCase : Optional[Any] = x * math.cos(_lowerCamelCase ) - z * math.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = z * math.cos(_lowerCamelCase ) + x * math.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 707 |
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = "bit"
_UpperCAmelCase = ["preactivation", "bottleneck"]
_UpperCAmelCase = ["SAME", "VALID"]
def __init__( self ,_A=3 ,_A=64 ,_A=[256, 512, 1024, 2048] ,_A=[3, 4, 6, 3] ,_A="preactivation" ,_A="relu" ,_A=None ,_A=32 ,_A=0.0 ,_A=False ,_A=32 ,_A=1 ,_A=None ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(**_A )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase : Dict = global_padding.upper()
else:
raise ValueError(F"""Padding strategy {global_padding} not supported""" )
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : str = embedding_size
_lowerCAmelCase : Optional[int] = hidden_sizes
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : Dict = layer_type
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Tuple = global_padding
_lowerCAmelCase : Dict = num_groups
_lowerCAmelCase : Optional[Any] = drop_path_rate
_lowerCAmelCase : Tuple = embedding_dynamic_padding
_lowerCAmelCase : Union[str, Any] = output_stride
_lowerCAmelCase : Dict = width_factor
_lowerCAmelCase : List[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 ,len(_A ) + 1 )]
_lowerCAmelCase : Dict = get_aligned_output_features_output_indices(
out_features=_A ,out_indices=_A ,stage_names=self.stage_names )
| 708 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 5_1_2,
"""bert-large-uncased""": 5_1_2,
"""bert-base-cased""": 5_1_2,
"""bert-large-cased""": 5_1_2,
"""bert-base-multilingual-uncased""": 5_1_2,
"""bert-base-multilingual-cased""": 5_1_2,
"""bert-base-chinese""": 5_1_2,
"""bert-base-german-cased""": 5_1_2,
"""bert-large-uncased-whole-word-masking""": 5_1_2,
"""bert-large-cased-whole-word-masking""": 5_1_2,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 5_1_2,
"""bert-base-cased-finetuned-mrpc""": 5_1_2,
"""bert-base-german-dbmdz-cased""": 5_1_2,
"""bert-base-german-dbmdz-uncased""": 5_1_2,
"""TurkuNLP/bert-base-finnish-cased-v1""": 5_1_2,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 5_1_2,
"""wietsedv/bert-base-dutch-cased""": 5_1_2,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BertTokenizer
def __init__( self ,_A=None ,_A=None ,_A=True ,_A="[UNK]" ,_A="[SEP]" ,_A="[PAD]" ,_A="[CLS]" ,_A="[MASK]" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
_A ,tokenizer_file=_A ,do_lower_case=_A ,unk_token=_A ,sep_token=_A ,pad_token=_A ,cls_token=_A ,mask_token=_A ,tokenize_chinese_chars=_A ,strip_accents=_A ,**_A ,)
_lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_A ) != do_lower_case
or normalizer_state.get('strip_accents' ,_A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_A ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(_A ,normalizer_state.pop('type' ) )
_lowerCAmelCase : Dict = do_lower_case
_lowerCAmelCase : Optional[int] = strip_accents
_lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
_lowerCAmelCase : Dict = normalizer_class(**_A )
_lowerCAmelCase : Union[str, Any] = do_lower_case
def __lowerCamelCase ( self ,_A ,_A=None ):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_A ,name=_A )
return tuple(_A )
| 16 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.