code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('only integers accepted as input' )
else:
A = str(abs(snake_case__ ) )
A = [list(snake_case__ ) for char in range(len(snake_case__ ) )]
for index in range(len(snake_case__ ) ):
num_transpositions[index].pop(snake_case__ )
return max(
int(''.join(list(snake_case__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod() | 22 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_lowercase = logging.getLogger(__name__)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> Tuple:
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : Any ,A_ : Dict ,A_ : Union[str, Any] ) -> Tuple:
if not self.initialized:
A = RagRetriever(
A_ ,question_encoder_tokenizer=A_ ,generator_tokenizer=A_ ,index=A_ ,init_retrieval=A_ ,)
A = True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
self.retriever.index.init_index()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : int ) -> List[Any]:
A , A = self.retriever._main_retrieve(A_ ,A_ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : int ,A_ : Any ,A_ : Optional[Any] ,A_ : Dict ,A_ : str=None ) -> List[str]:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ ,question_encoder_tokenizer=A_ ,generator_tokenizer=A_ ,index=A_ ,init_retrieval=A_ ,)
A = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ ,A_ ,A_ ,A_ )
for worker in self.retrieval_workers
] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : int ) -> Optional[Any]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
A , A = ray.get(random_worker.retrieve.remote(A_ ,A_ ) )
else:
A , A = self._main_retrieve(A_ ,A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ,A_ : Optional[int]=None ,**A_ : Any ) -> str:
return super(A_ ,cls ).get_tokenizers(A_ ,A_ ,**A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : int ,A_ : Union[str, Any] ,A_ : int=None ,**A_ : List[str] ) -> Union[str, Any]:
A = kwargs.pop('config' ,A_ ) or RagConfig.from_pretrained(A_ ,**A_ )
A = RagTokenizer.from_pretrained(A_ ,config=A_ )
A = rag_tokenizer.question_encoder
A = rag_tokenizer.generator
if indexed_dataset is not None:
A = 'custom'
A = CustomHFIndex(config.retrieval_vector_size ,A_ )
else:
A = cls._build_index(A_ )
return cls(
A_ ,question_encoder_tokenizer=A_ ,generator_tokenizer=A_ ,retrieval_workers=A_ ,index=A_ ,) | 22 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''albert'''
def __init__( self : Any ,A_ : List[str]=3_0000 ,A_ : Tuple=128 ,A_ : Tuple=4096 ,A_ : Optional[int]=12 ,A_ : Dict=1 ,A_ : Optional[int]=64 ,A_ : str=1_6384 ,A_ : Union[str, Any]=1 ,A_ : List[Any]="gelu_new" ,A_ : Optional[Any]=0 ,A_ : Any=0 ,A_ : Dict=512 ,A_ : Dict=2 ,A_ : Optional[Any]=0.02 ,A_ : Optional[int]=1e-12 ,A_ : Dict=0.1 ,A_ : List[Any]="absolute" ,A_ : Optional[int]=0 ,A_ : Optional[Any]=2 ,A_ : List[str]=3 ,**A_ : List[Any] ,) -> Any:
super().__init__(pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
A = vocab_size
A = embedding_size
A = hidden_size
A = num_hidden_layers
A = num_hidden_groups
A = num_attention_heads
A = inner_group_num
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = classifier_dropout_prob
A = position_embedding_type
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 22 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {
'''squeezebert/squeezebert-uncased''': 5_12,
'''squeezebert/squeezebert-mnli''': 5_12,
'''squeezebert/squeezebert-mnli-headless''': 5_12,
}
_lowercase = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: List[str] = SqueezeBertTokenizer
def __init__( self : Tuple ,A_ : List[Any]=None ,A_ : int=None ,A_ : Optional[Any]=True ,A_ : Optional[Any]="[UNK]" ,A_ : List[Any]="[SEP]" ,A_ : List[str]="[PAD]" ,A_ : Dict="[CLS]" ,A_ : int="[MASK]" ,A_ : str=True ,A_ : Any=None ,**A_ : int ,) -> str:
super().__init__(
A_ ,tokenizer_file=A_ ,do_lower_case=A_ ,unk_token=A_ ,sep_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,tokenize_chinese_chars=A_ ,strip_accents=A_ ,**A_ ,)
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,A_ ) != do_lower_case
or normalizer_state.get('strip_accents' ,A_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,A_ ) != tokenize_chinese_chars
):
A = getattr(A_ ,normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**A_ )
A = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Any ,A_ : Dict=None ) -> Any:
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
A = self._tokenizer.model.save(A_ ,name=A_ )
return tuple(A_ ) | 22 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 1 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_lowercase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_lowercase = '''main'''
# Default branch name
_lowercase = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
_lowercase = '''aaaaaaa'''
# This commit does not exist, so we should 404.
_lowercase = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
_lowercase = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def _snake_case ( ):
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def _snake_case ( ):
print('Bonjour!' )
yield
print('Au revoir!' )
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ) -> Optional[int]:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ) -> Dict:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> Dict:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
self.assertEqual(find_labels(A_ ) ,['labels'] )
self.assertEqual(find_labels(A_ ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(A_ ) ,['start_positions', 'end_positions'] )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(A_ ) ,['labels'] )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.assertEqual(find_labels(A_ ) ,['labels'] )
self.assertEqual(find_labels(A_ ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(A_ ) ,['start_positions', 'end_positions'] )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(A_ ) ,['labels'] )
@require_flax
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A_ ) ,[] )
self.assertEqual(find_labels(A_ ) ,[] )
self.assertEqual(find_labels(A_ ) ,[] )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(A_ ) ,[] ) | 22 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 1 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_lowercase )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Dict ,**A_ : Tuple ) -> int:
super().__init__(**A_ )
requires_backends(self ,'vision' )
requires_backends(self ,'torch' )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,**A_ : List[Any] ) -> Union[str, Any]:
A = {}
A = {}
A = {}
# preprocess args
if "points_per_batch" in kwargs:
A = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
A = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
A = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
A = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
A = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
A = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
A = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
A = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
A = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
A = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
A = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
A = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Dict ,A_ : Tuple ,*A_ : Tuple ,A_ : Tuple=None ,A_ : Any=None ,**A_ : Union[str, Any] ) -> Any:
return super().__call__(A_ ,*A_ ,num_workers=A_ ,batch_size=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Optional[int]=64 ,A_ : int = 0 ,A_ : float = 512 / 1500 ,A_ : Optional[int] = 32 ,A_ : Optional[int] = 1 ,) -> Dict:
A = load_image(A_ )
A = self.image_processor.size['longest_edge']
A , A , A , A = self.image_processor.generate_crop_boxes(
A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
A = self.image_processor(images=A_ ,return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
A = self.get_inference_context()
with inference_context():
A = self._ensure_tensor_on_device(A_ ,device=self.device )
A = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
A = image_embeddings
A = grid_points.shape[1]
A = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 ,A_ ,A_ ):
A = grid_points[:, i : i + points_per_batch, :, :]
A = input_labels[:, i : i + points_per_batch]
A = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[Any] ,A_ : Union[str, Any]=0.88 ,A_ : List[Any]=0.95 ,A_ : Union[str, Any]=0 ,A_ : str=1 ,) -> int:
A = model_inputs.pop('input_boxes' )
A = model_inputs.pop('is_last' )
A = model_inputs.pop('original_sizes' ).tolist()
A = model_inputs.pop('reshaped_input_sizes' ).tolist()
A = self.model(**A_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A = model_outputs['pred_masks']
A = self.image_processor.post_process_masks(
A_ ,A_ ,A_ ,A_ ,binarize=A_ )
A = model_outputs['iou_scores']
A , A , A = self.image_processor.filter_masks(
masks[0] ,iou_scores[0] ,original_sizes[0] ,input_boxes[0] ,A_ ,A_ ,A_ ,A_ ,)
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Any=False ,A_ : str=0.7 ,) -> List[str]:
A = []
A = []
A = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
A = torch.cat(A_ )
A = torch.cat(A_ )
A , A , A , A = self.image_processor.post_process_for_mask_generation(
A_ ,A_ ,A_ ,A_ )
A = defaultdict(A_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A_ )
A = {}
if output_rle_mask:
A = rle_mask
if output_bboxes_mask:
A = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 22 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
_lowercase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ):
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
_lowercase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_lowercase = [0, 0]
# all coordinates are given in format [y,x]
_lowercase = [len(grid) - 1, len(grid[0]) - 1]
_lowercase = 1
# the cost map which pushes the path closer to the goal
_lowercase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_lowercase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_lowercase = 99
_lowercase , _lowercase = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i]) | 22 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 1 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A = flax_key_tuple[:-1] + ('weight',)
A = torch.permute(snake_case__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ):
# linear layer
A = flax_key_tuple[:-1] + ('weight',)
A = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def _snake_case ( snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Dict ):
if "metadata" in layer:
A = layer.split('metadata' )
A = ''.join(split_layer[0] )[:-1]
A = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
A = layer.split('kvstore' )
A = ''.join(split_layer[0] )[:-1]
A = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
A = layer.split('/' )
A = '/'.join(split_layer[:-1] )
A = (split_layer[-1],)
if "kvstore/path" in layer:
A = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A = 'file'
else:
A = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[str] ):
A = rename_keys(snake_case__ )
A = {}
for k, v in current_block.items():
A = v
A = new_current_block
torch.save(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str = WEIGHTS_NAME ):
A = convert_file_size_to_int(snake_case__ )
A = []
A = {}
A = 0
A = 0
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
A = serialization.msgpack_restore(fp.read() )['optimizer']['target']
A = flatten_dict(snake_case__ , sep='/' )
A = {}
for layer in checkpoint_info.keys():
A , A , A = get_key_and_tensorstore_dict(
snake_case__ , snake_case__ , snake_case__ )
if curr_real_layer_name in all_layers:
A = content
else:
A = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A = torch.tensor(snake_case__ )
A = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A , A = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case__ )
A = '/'.join(snake_case__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A = os.path.join(
snake_case__ , weights_name.replace('.bin' , F'-{len(snake_case__ )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A = {}
A = 0
A = raw_weights.to(getattr(snake_case__ , snake_case__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A = os.path.join(snake_case__ , weights_name.replace('.bin' , F'-{len(snake_case__ )+1:05d}-of-???.bin' ) )
rename_and_save_block(snake_case__ , snake_case__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A = {}
A = {}
for idx, shard in enumerate(snake_case__ ):
A = weights_name.replace(
'.bin' , F'-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin' ) # len(sharded_state_dicts):05d}
A = os.path.join(snake_case__ , weights_name.replace('.bin' , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
A = shard
for key in shard:
A = shard_file
# Add the metadata
A = {'total_size': total_size}
A = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' ) as f:
A = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n'
f.write(snake_case__ )
return metadata, index
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_lowercase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _snake_case ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
A = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
A = TaTokenizer.from_pretrained('t5-small' )
A = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
A = tokenizer(snake_case__ , return_tensors='pt' ).input_ids
A = model.generate(snake_case__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
import numpy as np
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Dict ):
A = int(np.ceil((x_end - xa) / h ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(snake_case__ ):
A = f(snake_case__ , y[k] )
A = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A = f(x + h , y[k] + h * ka )
A = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : str , snake_case__ : Any , snake_case__ : str=False ):
if isinstance(snake_case__ , snake_case__ ) and isinstance(snake_case__ , snake_case__ ):
A = len(set_a.intersection(snake_case__ ) )
if alternative_union:
A = len(snake_case__ ) + len(snake_case__ )
else:
A = len(set_a.union(snake_case__ ) )
return intersection / union
if isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) ):
A = [element for element in set_a if element in set_b]
if alternative_union:
A = len(snake_case__ ) + len(snake_case__ )
return len(snake_case__ ) / union
else:
A = set_a + [element for element in set_b if element not in set_a]
return len(snake_case__ ) / len(snake_case__ )
return len(snake_case__ ) / len(snake_case__ )
return None
if __name__ == "__main__":
_lowercase = {'''a''', '''b''', '''c''', '''d''', '''e'''}
_lowercase = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b)) | 22 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = PegasusTokenizer
_lowerCamelCase: Optional[int] = PegasusTokenizerFast
_lowerCamelCase: str = True
_lowerCamelCase: List[Any] = True
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
A = PegasusTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _SCREAMING_SNAKE_CASE ( self : int ,**A_ : List[Any] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ) -> str:
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = '</s>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(A_ ) ,1103 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A = self.tokenizer_class.from_pretrained(self.tmpdirname )
A = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
A = rust_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0]
A = py_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
A = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
A = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
A = tokenizer([raw_input_str] ,return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
A = 'To ensure a smooth flow of bank resolutions.'
A = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
A = tokenizer([raw_input_str] ,return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_ ,A_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ['This is going to be way too long.' * 150, 'short example']
A = ['not super long but more than 5 tokens', 'tiny']
A = self._large_tokenizer(A_ ,padding=A_ ,truncation=A_ ,return_tensors='pt' )
A = self._large_tokenizer(
text_target=A_ ,max_length=5 ,padding=A_ ,truncation=A_ ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
# fmt: off
A = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = PegasusTokenizer
_lowerCamelCase: Dict = PegasusTokenizerFast
_lowerCamelCase: Any = True
_lowerCamelCase: Dict = True
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A = PegasusTokenizer(A_ ,offset=0 ,mask_token_sent=A_ ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _SCREAMING_SNAKE_CASE ( self : str ,**A_ : Any ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict ) -> Dict:
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A = self.tokenizer_class.from_pretrained(self.tmpdirname )
A = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
A = rust_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0]
A = py_tokenizer([raw_input_str] ,return_tensors=A_ ,add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_ ,A_ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = ['This is going to be way too long.' * 1000, 'short example']
A = ['not super long but more than 5 tokens', 'tiny']
A = self._large_tokenizer(A_ ,padding=A_ ,truncation=A_ ,return_tensors='pt' )
A = self._large_tokenizer(
text_target=A_ ,max_length=5 ,padding=A_ ,truncation=A_ ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
A = self._large_tokenizer(A_ ).input_ids
self.assertListEqual(
A_ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,) | 22 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : pyspark.sql.DataFrame ,A_ : Optional[NamedSplit] = None ,A_ : Optional[Features] = None ,A_ : bool = True ,A_ : str = None ,A_ : bool = False ,A_ : str = None ,A_ : bool = True ,A_ : str = "arrow" ,**A_ : int ,) -> int:
super().__init__(
split=A_ ,features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,**A_ ,)
A = load_from_cache_file
A = file_format
A = Spark(
df=A_ ,features=A_ ,cache_dir=A_ ,working_dir=A_ ,**A_ ,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=A_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split ) | 22 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 1 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[Any] ):
A = sorted(zip(snake_case__ , snake_case__ ) , key=lambda snake_case__ : x[0] / x[1] , reverse=snake_case__ )
A , A = [i[0] for i in r], [i[1] for i in r]
A = list(accumulate(snake_case__ ) )
A = bisect(snake_case__ , snake_case__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_lowercase = 4
_lowercase = 3
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
pass
def _snake_case ( snake_case__ : List[str] ):
for shard in shards:
for i in range(snake_case__ ):
yield {"i": i, "shard": shard}
def _snake_case ( ):
A = int(os.environ['RANK'] )
A = int(os.environ['WORLD_SIZE'] )
A = ArgumentParser()
parser.add_argument('--streaming' , type=snake_case__ )
parser.add_argument('--local_rank' , type=snake_case__ )
parser.add_argument('--num_workers' , type=snake_case__ , default=0 )
A = parser.parse_args()
A = args.streaming
A = args.num_workers
A = {'shards': [F'shard_{shard_idx}' for shard_idx in range(snake_case__ )]}
A = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ )
if not streaming:
A = Dataset.from_list(list(snake_case__ ) )
A = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ )
A = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ )
A = NUM_SHARDS * NUM_ITEMS_PER_SHARD
A = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
A = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main() | 22 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 1 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : Tuple ,A_ : str=13 ,A_ : List[Any]=7 ,A_ : Dict=True ,A_ : Optional[int]=True ,A_ : Optional[int]=True ,A_ : Any=True ,A_ : Any=99 ,A_ : Optional[int]=24 ,A_ : Tuple=2 ,A_ : int=6 ,A_ : Dict=37 ,A_ : List[str]="gelu" ,A_ : str=0.1 ,A_ : List[Any]=0.1 ,A_ : Any=512 ,A_ : Union[str, Any]=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : Any=3 ,A_ : Tuple=None ,A_ : Optional[int]=1000 ,) -> Union[str, Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = scope
A = range_bbox
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A = bbox[i, j, 3]
A = bbox[i, j, 1]
A = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A = bbox[i, j, 2]
A = bbox[i, j, 0]
A = t
A = None
if self.use_input_mask:
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ,A_ : Optional[int] ,A_ : str ,A_ : Union[str, Any] ,A_ : str ,A_ : Dict ,A_ : Tuple ,) -> List[str]:
A = LiltModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,bbox=A_ ,attention_mask=A_ ,token_type_ids=A_ )
A = model(A_ ,bbox=A_ ,token_type_ids=A_ )
A = model(A_ ,bbox=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : Dict ,A_ : List[str] ,A_ : Dict ,A_ : int ,) -> Optional[Any]:
A = self.num_labels
A = LiltForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
A = model(
A_ ,bbox=A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Dict ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : Dict ,A_ : Dict ,) -> List[Any]:
A = LiltForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
A = model(
A_ ,bbox=A_ ,attention_mask=A_ ,token_type_ids=A_ ,start_positions=A_ ,end_positions=A_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase: str = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase: Optional[Any] = False
_lowerCamelCase: List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : Dict ,A_ : Union[str, Any] ) -> int:
return True
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = LiltModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = LiltModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
A = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A_ )
A = torch.tensor([[1, 2]] ,device=A_ )
A = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=A_ )
# forward pass
with torch.no_grad():
A = model(input_ids=A_ ,bbox=A_ )
A = torch.Size([1, 2, 768] )
A = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] ,device=A_ ,)
self.assertTrue(outputs.last_hidden_state.shape ,A_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,A_ ,atol=1e-3 ) ) | 22 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list ):
if len(snake_case__ ) <= 1:
return lst
A = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A , A = lst[i], lst[i - 1]
i -= 1
if i == 0:
A = 1
return lst
if __name__ == "__main__":
_lowercase = input('''Enter numbers separated by a comma:\n''').strip()
_lowercase = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted)) | 22 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = '''gptsan-japanese'''
_lowerCamelCase: Dict = [
'''past_key_values''',
]
_lowerCamelCase: Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict ,A_ : Any=3_6000 ,A_ : Dict=1280 ,A_ : Any=1024 ,A_ : Optional[int]=8192 ,A_ : int=4096 ,A_ : Any=128 ,A_ : Union[str, Any]=10 ,A_ : str=0 ,A_ : Union[str, Any]=16 ,A_ : str=16 ,A_ : str=128 ,A_ : Any=0.0 ,A_ : Tuple=1e-5 ,A_ : int=False ,A_ : Optional[int]=0.0 ,A_ : int="float32" ,A_ : List[Any]=False ,A_ : Union[str, Any]=False ,A_ : Optional[Any]=False ,A_ : str=0.0_02 ,A_ : Optional[int]=False ,A_ : List[str]=True ,A_ : Optional[int]=3_5998 ,A_ : Dict=3_5995 ,A_ : str=3_5999 ,**A_ : List[Any] ,) -> Optional[int]:
A = vocab_size
A = max_position_embeddings
A = d_model
A = d_ff
A = d_ext
A = d_spout
A = num_switch_layers
A = num_ext_layers
A = num_switch_layers + num_ext_layers
A = num_heads
A = num_experts
A = expert_capacity
A = dropout_rate
A = layer_norm_epsilon
A = router_bias
A = router_jitter_noise
A = router_dtype
A = router_ignore_padding_tokens
A = output_hidden_states
A = output_attentions
A = initializer_factor
A = output_router_logits
A = use_cache
super().__init__(
separator_token_id=A_ ,pad_token_id=A_ ,eos_token_id=A_ ,**A_ ,) | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = '''Wav2Vec2FeatureExtractor'''
_lowerCamelCase: Union[str, Any] = '''AutoTokenizer'''
def __init__( self : Union[str, Any] ,A_ : Optional[Any] ,A_ : Any ) -> Optional[Any]:
super().__init__(A_ ,A_ )
A = self.feature_extractor
A = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,A_ : Tuple ,**A_ : Union[str, Any] ) -> List[Any]:
try:
return super().from_pretrained(A_ ,**A_ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' ,A_ ,)
A = WavaVecaFeatureExtractor.from_pretrained(A_ ,**A_ )
A = WavaVecaCTCTokenizer.from_pretrained(A_ ,**A_ )
return cls(feature_extractor=A_ ,tokenizer=A_ )
def __call__( self : Optional[int] ,*A_ : str ,**A_ : List[str] ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ ,**A_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A = kwargs.pop('raw_speech' )
else:
A = kwargs.pop('audio' ,A_ )
A = kwargs.pop('sampling_rate' ,A_ )
A = kwargs.pop('text' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A = self.feature_extractor(A_ ,*A_ ,sampling_rate=A_ ,**A_ )
if text is not None:
A = self.tokenizer(A_ ,**A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A = encodings['input_ids']
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ,*A_ : Tuple ,**A_ : Optional[Any] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A_ ,**A_ )
A = kwargs.pop('input_features' ,A_ )
A = kwargs.pop('labels' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if input_features is not None:
A = self.feature_extractor.pad(A_ ,*A_ ,**A_ )
if labels is not None:
A = self.tokenizer.pad(A_ ,**A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A = labels['input_ids']
return input_features
def _SCREAMING_SNAKE_CASE ( self : str ,*A_ : str ,**A_ : Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,*A_ : int ,**A_ : Optional[int] ) -> List[Any]:
return self.tokenizer.decode(*A_ ,**A_ )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A = True
A = self.tokenizer
yield
A = self.feature_extractor
A = False | 22 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 1 |
"""simple docstring"""
from collections.abc import Sequence
def _snake_case ( snake_case__ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
A = nums[0]
for i in range(1 , len(snake_case__ ) ):
A = nums[i]
A = max(snake_case__ , ans + num , snake_case__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input('''Enter number of elements : ''').strip())
_lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array)) | 22 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: int
_lowerCamelCase: Node | None = None
_lowerCamelCase: Node | None = None
def _snake_case ( ):
A = Node(1 )
A = Node(2 )
A = Node(3 )
A = Node(4 )
A = Node(5 )
return tree
def _snake_case ( snake_case__ : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _snake_case ( snake_case__ : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _snake_case ( snake_case__ : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _snake_case ( snake_case__ : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _snake_case ( snake_case__ : Node | None ):
A = []
if root is None:
return output
A = deque([root] )
while process_queue:
A = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _snake_case ( snake_case__ : Node | None , snake_case__ : int ):
A = []
def populate_output(snake_case__ : Node | None , snake_case__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def _snake_case ( snake_case__ : Node | None , snake_case__ : int ):
A = []
def populate_output(snake_case__ : Node | None , snake_case__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case__ , snake_case__ )
return output
def _snake_case ( snake_case__ : Node | None ):
if root is None:
return []
A = []
A = 0
A = height(snake_case__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) )
A = 1
else:
output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) )
A = 0
return output
def _snake_case ( ): # Main function for testing.
A = make_tree()
print(F'In-order Traversal: {inorder(snake_case__ )}' )
print(F'Pre-order Traversal: {preorder(snake_case__ )}' )
print(F'Post-order Traversal: {postorder(snake_case__ )}' , '\n' )
print(F'Height of Tree: {height(snake_case__ )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(snake_case__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(snake_case__ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 22 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 1 |
"""simple docstring"""
_lowercase = 9.80_665
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float = g ):
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 22 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''pixel_values''']
def __init__( self : Union[str, Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : bool = True ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Dict ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,default_to_square=A_ ,param_name='crop_size' )
A = do_resize
A = do_rescale
A = do_normalize
A = do_center_crop
A = crop_size
A = size
A = resample
A = rescale_factor
A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Tuple ,) -> np.ndarray:
A = get_size_dict(A_ )
if "shortest_edge" in size:
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
A = (size['height'], size['width'])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Dict ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : str ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : int = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : List[Any] ,) -> BatchFeature:
A = do_resize if do_resize is not None else self.do_resize
A = do_rescale if do_rescale is not None else self.do_rescale
A = do_normalize if do_normalize is not None else self.do_normalize
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(A_ )
if not is_batched(A_ ):
A = [images]
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ ) | 22 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def _snake_case ( snake_case__ : bytes ):
if len(snake_case__ ) != 32:
raise ValueError('Input must be of length 32' )
A = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _snake_case ( snake_case__ : int ):
if i < 0:
raise ValueError('Input must be non-negative' )
A = format(snake_case__ , '08x' )[-8:]
A = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def _snake_case ( snake_case__ : bytes ):
A = B''
for char in message:
bit_string += format(snake_case__ , '08b' ).encode('utf-8' )
A = format(len(snake_case__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _snake_case ( snake_case__ : bytes ):
if len(snake_case__ ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(snake_case__ ) , 512 ):
A = bit_string[pos : pos + 512]
A = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _snake_case ( snake_case__ : int ):
if i < 0:
raise ValueError('Input must be non-negative' )
A = format(snake_case__ , '032b' )
A = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2 )
def _snake_case ( snake_case__ : int , snake_case__ : int ):
return (a + b) % 2**32
def _snake_case ( snake_case__ : int , snake_case__ : int ):
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _snake_case ( snake_case__ : bytes ):
A = preprocess(snake_case__ )
A = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A = 0x67_452_301
A = 0xef_cda_b89
A = 0x98_bad_cfe
A = 0x10_325_476
A = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__ ):
A = aa
A = ba
A = ca
A = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A = d ^ (b & (c ^ d))
A = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A = c ^ (d & (b ^ c))
A = (5 * i + 1) % 16
elif i <= 47:
A = b ^ c ^ d
A = (3 * i + 5) % 16
else:
A = c ^ (b | not_aa(snake_case__ ))
A = (7 * i) % 16
A = (f + a + added_consts[i] + block_words[g]) % 2**32
A = d
A = c
A = b
A = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i] ) )
# Add hashed chunk to running total
A = sum_aa(snake_case__ , snake_case__ )
A = sum_aa(snake_case__ , snake_case__ )
A = sum_aa(snake_case__ , snake_case__ )
A = sum_aa(snake_case__ , snake_case__ )
A = reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ ) + reformat_hex(snake_case__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 1 |
"""simple docstring"""
import math
def _snake_case ( snake_case__ : int = 100 ):
A = sum(i * i for i in range(1 , n + 1 ) )
A = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""") | 22 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = '''mra'''
def __init__( self : List[Any] ,A_ : Tuple=5_0265 ,A_ : int=768 ,A_ : Dict=12 ,A_ : Union[str, Any]=12 ,A_ : Tuple=3072 ,A_ : Tuple="gelu" ,A_ : Optional[Any]=0.1 ,A_ : Optional[Any]=0.1 ,A_ : Dict=512 ,A_ : Any=1 ,A_ : str=0.02 ,A_ : List[Any]=1e-5 ,A_ : Optional[Any]="absolute" ,A_ : Union[str, Any]=4 ,A_ : Union[str, Any]="full" ,A_ : Union[str, Any]=0 ,A_ : str=0 ,A_ : Optional[Any]=1 ,A_ : str=0 ,A_ : Any=2 ,**A_ : Tuple ,) -> List[str]:
super().__init__(pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ )
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
A = position_embedding_type
A = block_per_row
A = approx_mode
A = initial_prior_first_n_blocks
A = initial_prior_diagonal_n_blocks | 22 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = '''conditional_detr'''
_lowerCamelCase: List[str] = ['''past_key_values''']
_lowerCamelCase: Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Optional[int] ,A_ : List[Any]=True ,A_ : Optional[Any]=None ,A_ : List[str]=3 ,A_ : str=300 ,A_ : List[Any]=6 ,A_ : List[str]=2048 ,A_ : int=8 ,A_ : str=6 ,A_ : str=2048 ,A_ : Union[str, Any]=8 ,A_ : str=0.0 ,A_ : List[str]=0.0 ,A_ : Tuple=True ,A_ : int="relu" ,A_ : int=256 ,A_ : Any=0.1 ,A_ : List[str]=0.0 ,A_ : Optional[int]=0.0 ,A_ : int=0.02 ,A_ : str=1.0 ,A_ : List[Any]=False ,A_ : List[Any]="sine" ,A_ : Optional[Any]="resnet50" ,A_ : Any=True ,A_ : Union[str, Any]=False ,A_ : List[str]=2 ,A_ : Tuple=5 ,A_ : Tuple=2 ,A_ : int=1 ,A_ : Optional[int]=1 ,A_ : Optional[Any]=2 ,A_ : Union[str, Any]=5 ,A_ : Any=2 ,A_ : Any=0.25 ,**A_ : Any ,) -> Optional[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(A_ ,A_ ):
A = backbone_config.get('model_type' )
A = CONFIG_MAPPING[backbone_model_type]
A = config_class.from_dict(A_ )
A = use_timm_backbone
A = backbone_config
A = num_channels
A = num_queries
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = init_xavier_std
A = encoder_layerdrop
A = decoder_layerdrop
A = encoder_layers
A = auxiliary_loss
A = position_embedding_type
A = backbone
A = use_pretrained_backbone
A = dilation
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = mask_loss_coefficient
A = dice_loss_coefficient
A = cls_loss_coefficient
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = focal_alpha
super().__init__(is_encoder_decoder=A_ ,**A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return self.d_model
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: int = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return 12 | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( *A_ : List[str] ,**A_ : Union[str, Any] ) -> Tuple:
pass
def _snake_case ( snake_case__ : Image ):
A = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Dict ) -> Optional[Any]:
A = DepthEstimationPipeline(model=A_ ,image_processor=A_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : List[Any] ) -> Union[str, Any]:
A = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} ,A_ )
import datasets
A = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
A = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] ,A_ ,)
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
pass
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
A = 'Intel/dpt-large'
A = pipeline('depth-estimation' ,model=A_ )
A = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
A = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) ,29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) ,2.6_62 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
# This is highly irregular to have no small tests.
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' ) | 22 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[Any] ):
A = checkpoint
A = {}
A = vae_state_dict['encoder.conv_in.weight']
A = vae_state_dict['encoder.conv_in.bias']
A = vae_state_dict['encoder.conv_out.weight']
A = vae_state_dict['encoder.conv_out.bias']
A = vae_state_dict['encoder.norm_out.weight']
A = vae_state_dict['encoder.norm_out.bias']
A = vae_state_dict['decoder.conv_in.weight']
A = vae_state_dict['decoder.conv_in.bias']
A = vae_state_dict['decoder.conv_out.weight']
A = vae_state_dict['decoder.conv_out.bias']
A = vae_state_dict['decoder.norm_out.weight']
A = vae_state_dict['decoder.norm_out.bias']
A = vae_state_dict['quant_conv.weight']
A = vae_state_dict['quant_conv.bias']
A = vae_state_dict['post_quant_conv.weight']
A = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
A = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(snake_case__ )
}
# Retrieves the keys for the decoder up blocks only
A = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
A = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(snake_case__ )
}
for i in range(snake_case__ ):
A = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
A = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
A = renew_vae_resnet_paths(snake_case__ )
A = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
A = [key for key in vae_state_dict if 'encoder.mid.block' in key]
A = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
A = renew_vae_resnet_paths(snake_case__ )
A = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
A = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
A = renew_vae_attention_paths(snake_case__ )
A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
conv_attn_to_linear(snake_case__ )
for i in range(snake_case__ ):
A = num_up_blocks - 1 - i
A = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
A = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
A = renew_vae_resnet_paths(snake_case__ )
A = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
A = [key for key in vae_state_dict if 'decoder.mid.block' in key]
A = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
A = renew_vae_resnet_paths(snake_case__ )
A = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
A = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
A = renew_vae_attention_paths(snake_case__ )
A = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
conv_attn_to_linear(snake_case__ )
return new_checkpoint
def _snake_case ( snake_case__ : str , snake_case__ : str , ):
# Only support V1
A = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
A = io.BytesIO(r.content )
A = OmegaConf.load(snake_case__ )
A = 512
A = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
A = {}
with safe_open(snake_case__ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
A = f.get_tensor(snake_case__ )
else:
A = torch.load(snake_case__ , map_location=snake_case__ )['state_dict']
# Convert the VAE model.
A = create_vae_diffusers_config(snake_case__ , image_size=snake_case__ )
A = custom_convert_ldm_vae_checkpoint(snake_case__ , snake_case__ )
A = AutoencoderKL(**snake_case__ )
vae.load_state_dict(snake_case__ )
vae.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
_lowercase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 22 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''flax''']
def __init__( self : Optional[Any] ,*A_ : int ,**A_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Any ,**A_ : List[Any] ) -> int:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : Optional[Any] ,**A_ : Union[str, Any] ) -> str:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = ['''flax''']
def __init__( self : Any ,*A_ : List[str] ,**A_ : Tuple ) -> Any:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Tuple ,**A_ : str ) -> Optional[int]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,*A_ : List[Any] ,**A_ : List[str] ) -> int:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''flax''']
def __init__( self : int ,*A_ : List[Any] ,**A_ : int ) -> str:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : str ,**A_ : List[str] ) -> Union[str, Any]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[str] ,**A_ : Any ) -> Optional[Any]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''flax''']
def __init__( self : Union[str, Any] ,*A_ : Tuple ,**A_ : List[Any] ) -> Tuple:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : str ,**A_ : Tuple ) -> Union[str, Any]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Dict ,**A_ : Union[str, Any] ) -> List[Any]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''flax''']
def __init__( self : int ,*A_ : Union[str, Any] ,**A_ : List[Any] ) -> List[Any]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : int ,**A_ : List[str] ) -> List[str]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[Any] ,**A_ : Optional[Any] ) -> Tuple:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''flax''']
def __init__( self : int ,*A_ : Tuple ,**A_ : str ) -> Union[str, Any]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> Tuple:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : int ) -> Optional[Any]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''flax''']
def __init__( self : Any ,*A_ : List[str] ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Optional[int] ,**A_ : Any ) -> Dict:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,*A_ : Dict ,**A_ : Optional[Any] ) -> Dict:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''flax''']
def __init__( self : Optional[int] ,*A_ : Optional[int] ,**A_ : Dict ) -> List[str]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : str ) -> Optional[int]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : List[str] ,**A_ : Dict ) -> Union[str, Any]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''flax''']
def __init__( self : List[str] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Tuple:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ,*A_ : List[str] ,**A_ : Tuple ) -> Optional[int]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,*A_ : Union[str, Any] ,**A_ : Any ) -> Optional[int]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ['''flax''']
def __init__( self : Optional[Any] ,*A_ : List[Any] ,**A_ : Optional[int] ) -> Any:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : str ,**A_ : Union[str, Any] ) -> Dict:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,*A_ : Tuple ,**A_ : Any ) -> Dict:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ['''flax''']
def __init__( self : Optional[Any] ,*A_ : List[Any] ,**A_ : int ) -> List[Any]:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : int ,**A_ : Union[str, Any] ) -> Tuple:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : int ,**A_ : Optional[Any] ) -> Optional[int]:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''flax''']
def __init__( self : List[Any] ,*A_ : List[str] ,**A_ : List[str] ) -> str:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : List[str] ,**A_ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,*A_ : Any ,**A_ : List[str] ) -> Dict:
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=_lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = ['''flax''']
def __init__( self : List[str] ,*A_ : List[Any] ,**A_ : Dict ) -> Dict:
requires_backends(self ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ,*A_ : Optional[Any] ,**A_ : List[str] ) -> Any:
requires_backends(cls ,['flax'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ,*A_ : Tuple ,**A_ : Any ) -> Dict:
requires_backends(cls ,['flax'] ) | 22 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase: Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase: List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase: Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_lowerCamelCase: Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
A = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
torch.manual_seed(0 )
A = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,)
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Any ,A_ : Union[str, Any]=0 ) -> int:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = 2
A = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A_ ,device=torch.device(A_ ) ,)
A = floats_tensor(control_image.shape ,rng=random.Random(A_ ) ).to(A_ )
A = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((64, 64) )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = StableDiffusionControlNetImgaImgPipeline
_lowerCamelCase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowerCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase: Optional[int] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
torch.manual_seed(0 )
def init_weights(A_ : Tuple ):
if isinstance(A_ ,torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
A = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(A_ )
torch.manual_seed(0 )
A = ControlNetModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,)
controlneta.controlnet_down_blocks.apply(A_ )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,)
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = MultiControlNetModel([controlneta, controlneta] )
A = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : Optional[int]=0 ) -> str:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = 2
A = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A_ ,device=torch.device(A_ ) ,),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=A_ ,device=torch.device(A_ ) ,),
]
A = floats_tensor(control_image[0].shape ,rng=random.Random(A_ ) ).to(A_ )
A = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((64, 64) )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
pipe.to(A_ )
A = 10.0
A = 4
A = self.get_dummy_inputs(A_ )
A = steps
A = scale
A = pipe(**A_ )[0]
A = self.get_dummy_inputs(A_ )
A = steps
A = scale
A = pipe(**A_ ,control_guidance_start=0.1 ,control_guidance_end=0.2 )[0]
A = self.get_dummy_inputs(A_ )
A = steps
A = scale
A = pipe(**A_ ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7] )[0]
A = self.get_dummy_inputs(A_ )
A = steps
A = scale
A = pipe(**A_ ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(A_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
A = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,safety_checker=A_ ,controlnet=A_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A_ )
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = 'evil space-punk bird'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
A = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
A = pipe(
A_ ,A_ ,control_image=A_ ,generator=A_ ,output_type='np' ,num_inference_steps=50 ,strength=0.6 ,)
A = output.images[0]
assert image.shape == (512, 512, 3)
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2 | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
_lowercase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
_lowercase = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
_lowercase = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int]=None ,A_ : int=None ,A_ : str=False ) -> List[str]:
if concatenate_texts:
return compute_measures(A_ ,A_ )["wer"]
else:
A = 0
A = 0
for prediction, reference in zip(A_ ,A_ ):
A = compute_measures(A_ ,A_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 22 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def _snake_case ( snake_case__ : Callable[[int | float], int | float] , snake_case__ : int | float , snake_case__ : int | float , snake_case__ : int = 100 , ):
A = x_start
A = fnc(snake_case__ )
A = 0.0
for _ in range(snake_case__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
A = (x_end - x_start) / steps + xa
A = fnc(snake_case__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
A = xa
A = fxa
return area
if __name__ == "__main__":
def _snake_case ( snake_case__ : Tuple ):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowercase = 10
while i <= 10_00_00:
print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10 | 22 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 1 |
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))''')) | 22 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 1 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str ,A_ : Optional[Any] ,) -> Union[str, Any]:
A = parent
A = 13
A = 7
A = True
A = True
A = False
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ,A_ : Tuple ,A_ : Optional[Any] ,A_ : str ,A_ : Union[str, Any] ,A_ : str ) -> List[str]:
A = TFDistilBertModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : List[Any] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : str ) -> Any:
A = TFDistilBertForMaskedLM(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Any ,A_ : Any ,A_ : Dict ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : str ) -> Tuple:
A = TFDistilBertForQuestionAnswering(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
A = model(A_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ) -> Tuple:
A = self.num_labels
A = TFDistilBertForSequenceClassification(A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Tuple ,A_ : List[str] ,A_ : str ,A_ : Tuple ,A_ : Tuple ,A_ : Tuple ) -> int:
A = self.num_choices
A = TFDistilBertForMultipleChoice(A_ )
A = tf.tile(tf.expand_dims(A_ ,1 ) ,(1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(A_ ,1 ) ,(1, self.num_choices, 1) )
A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[int] ,A_ : int ,A_ : int ,A_ : List[Any] ) -> str:
A = self.num_labels
A = TFDistilBertForTokenClassification(A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = self.prepare_config_and_inputs()
((A) , (A) , (A) , (A) , (A) , (A)) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCamelCase: str = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Optional[int] = False
_lowerCamelCase: str = False
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = TFDistilBertModelTester(self )
A = ConfigTester(self ,config_class=A_ ,dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A = TFDistilBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 768]
self.assertEqual(output.shape ,A_ )
A = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,A_ ,atol=1e-4 ) | 22 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 1 |
"""simple docstring"""
import requests
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = {'Content-Type': 'application/json'}
A = requests.post(snake_case__ , json={'text': message_body} , headers=snake_case__ )
if response.status_code != 200:
A = (
'Request to slack returned an error '
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(snake_case__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''') | 22 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 1 |
"""simple docstring"""
_lowercase = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 22 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 1 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Dict ,A_ : Any ,A_ : Union[str, Any] ) -> List[Any]:
A = name
A = value
A = weight
def __repr__( self : Dict ) -> Union[str, Any]:
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return self.value
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.name
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
return self.weight
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.value / self.weight
def _snake_case ( snake_case__ : Any , snake_case__ : Any , snake_case__ : Dict ):
A = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any ):
A = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
A = []
A , A = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _snake_case ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : int ):
if not isinstance(snake_case__ , snake_case__ ):
A = F'Input value of [number={number}] must be an integer'
raise TypeError(snake_case__ )
if number < 0:
return False
A = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( snake_case__ : int ):
if num <= 0:
A = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(snake_case__ )
A = [True] * (num + 1)
A = []
A = 2
A = int(math.sqrt(snake_case__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case__ ):
if sieve[i] is True:
A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip()))) | 22 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 1 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : Dict ,A_ : int = 13 ,A_ : int = 64 ,A_ : int = 2 ,A_ : int = 3 ,A_ : int = 3 ,A_ : bool = True ,A_ : bool = True ,A_ : int = 128 ,A_ : Dict=[16, 32, 64, 128] ,A_ : int = 7 ,A_ : int = 4 ,A_ : int = 37 ,A_ : str = "gelu" ,A_ : float = 0.1 ,A_ : float = 0.1 ,A_ : int = 10 ,A_ : float = 0.02 ,A_ : int = 2 ,A_ : int = 1 ,A_ : int = 128 ,A_ : List[int] = [2, 2, 2, 2] ,A_ : int = 2 ,A_ : int = 2 ,) -> str:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = encoder_stride
A = num_attention_outputs
A = embed_dim
A = embed_dim + 1
A = resolution
A = depths
A = hidden_sizes
A = dim
A = mlp_expansion_ratio
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : List[Any] ,A_ : Union[str, Any] ) -> List[Any]:
A = TFEfficientFormerModel(config=A_ )
A = model(A_ ,training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : int ,A_ : Union[str, Any] ) -> Union[str, Any]:
A = self.type_sequence_label_size
A = TFEfficientFormerForImageClassification(A_ )
A = model(A_ ,labels=A_ ,training=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = TFEfficientFormerForImageClassification(A_ )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: Union[str, Any] = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCamelCase: int = False
_lowerCamelCase: Tuple = False
_lowerCamelCase: Dict = False
_lowerCamelCase: Dict = False
_lowerCamelCase: Any = False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = TFEfficientFormerModelTester(self )
A = ConfigTester(
self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
def check_hidden_states_output(A_ : Tuple ,A_ : Tuple ,A_ : str ):
A = model_class(A_ )
A = model(**self._prepare_for_class(A_ ,A_ ) ,training=A_ )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) ,A_ )
if hasattr(self.model_tester ,'encoder_seq_length' ):
A = self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,'chunk_length' ) and self.model_tester.chunk_length > 1:
A = seq_length * self.model_tester.chunk_length
else:
A = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
A = outputs.decoder_hidden_states
self.asseretIsInstance(A_ ,(list, tuple) )
self.assertEqual(len(A_ ) ,A_ )
A = getattr(self.model_tester ,'seq_length' ,A_ )
A = getattr(self.model_tester ,'decoder_seq_length' ,A_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(A_ ,A_ ,A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Tuple ,A_ : List[str]=False ) -> List[Any]:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEfficientFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = getattr(self.model_tester ,'seq_length' ,A_ )
A = getattr(self.model_tester ,'encoder_seq_length' ,A_ )
A = getattr(self.model_tester ,'key_length' ,A_ )
A = getattr(self.model_tester ,'chunk_length' ,A_ )
if chunk_length is not None and hasattr(self.model_tester ,'num_hashes' ):
A = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(A_ )
A = model(**self._prepare_for_class(A_ ,A_ ) ,training=A_ )
A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) ,self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = model_class(A_ )
A = model(**self._prepare_for_class(A_ ,A_ ) ,training=A_ )
A = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) ,self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A = model_class(A_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A = {
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=A_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A = model(A_ )
self.assertTrue(outputs_dict is not None )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='tf' )
# forward pass
A = model(**A_ ,training=A_ )
# verify the logits
A = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
A = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='tf' )
# forward pass
A = model(**A_ ,training=A_ )
# verify the logits
A = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
A = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) ) | 22 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : int = 200_0000 ):
A = [0 for i in range(n + 1 )]
A = 1
A = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , snake_case__ ):
A = 1
A = 0
for i in range(snake_case__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""") | 22 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = '''dpt'''
def __init__( self : Union[str, Any] ,A_ : Union[str, Any]=768 ,A_ : Tuple=12 ,A_ : Tuple=12 ,A_ : str=3072 ,A_ : Optional[int]="gelu" ,A_ : List[Any]=0.0 ,A_ : Dict=0.0 ,A_ : Dict=0.02 ,A_ : Any=1e-12 ,A_ : Optional[int]=384 ,A_ : Optional[Any]=16 ,A_ : Dict=3 ,A_ : Optional[Any]=False ,A_ : Dict=True ,A_ : Any=[2, 5, 8, 11] ,A_ : Optional[Any]="project" ,A_ : Tuple=[4, 2, 1, 0.5] ,A_ : List[Any]=[96, 192, 384, 768] ,A_ : Optional[Any]=256 ,A_ : Any=-1 ,A_ : Optional[int]=False ,A_ : Optional[Any]=True ,A_ : Dict=0.4 ,A_ : Dict=255 ,A_ : List[str]=0.1 ,A_ : Optional[Any]=[1, 1024, 24, 24] ,A_ : str=[0, 1] ,A_ : List[str]=None ,**A_ : Dict ,) -> Tuple:
super().__init__(**A_ )
A = hidden_size
A = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
A = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
A = BitConfig(**A_ )
elif isinstance(A_ ,A_ ):
A = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
A = backbone_featmap_shape
A = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
A = None
A = None
A = []
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
A = readout_type
A = reassemble_factors
A = neck_hidden_sizes
A = fusion_hidden_size
A = head_in_index
A = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
A = use_auxiliary_head
A = auxiliary_loss_weight
A = semantic_loss_ignore_index
A = semantic_classifier_dropout
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
A = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
A = self.backbone_config.to_dict()
A = self.__class__.model_type
return output | 22 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _snake_case ( snake_case__ : Dict[str, torch.Tensor] ):
A = []
A = []
A = []
for rt in rc.restypes:
A = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
A = {name: i for i, name in enumerate(snake_case__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
A = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein['aatype'].device , )
A = torch.tensor(
snake_case__ , dtype=torch.intaa , device=protein['aatype'].device , )
A = torch.tensor(
snake_case__ , dtype=torch.floataa , device=protein['aatype'].device , )
A = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
A = restype_atomaa_to_atomaa[protein_aatype]
A = restype_atomaa_mask[protein_aatype]
A = residx_atomaa_mask
A = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
A = restype_atomaa_to_atomaa[protein_aatype]
A = residx_atomaa_to_atomaa.long()
# create the corresponding mask
A = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
A = rc.restype_atoa[restype_letter]
A = rc.residue_atoms[restype_name]
for atom_name in atom_names:
A = rc.atom_order[atom_name]
A = 1
A = restype_atomaa_mask[protein_aatype]
A = residx_atomaa_mask
return protein
def _snake_case ( snake_case__ : Dict[str, torch.Tensor] ):
A = tree_map(lambda snake_case__ : torch.tensor(snake_case__ , device=batch['aatype'].device ) , snake_case__ , np.ndarray )
A = tensor_tree_map(lambda snake_case__ : np.array(snake_case__ ) , make_atomaa_masks(snake_case__ ) )
return out | 22 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = '''roformer'''
def __init__( self : Optional[Any] ,A_ : str=5_0000 ,A_ : str=None ,A_ : Optional[Any]=768 ,A_ : Optional[int]=12 ,A_ : List[str]=12 ,A_ : int=3072 ,A_ : str="gelu" ,A_ : List[str]=0.1 ,A_ : Dict=0.1 ,A_ : Union[str, Any]=1536 ,A_ : Dict=2 ,A_ : str=0.02 ,A_ : Union[str, Any]=1e-12 ,A_ : List[str]=0 ,A_ : List[str]=False ,A_ : Optional[Any]=True ,**A_ : Tuple ,) -> str:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 22 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 1 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
_lowercase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def _snake_case ( snake_case__ : List[Any] ):
A = {}
with open(snake_case__ , 'r' ) as file:
for line_number, line in enumerate(snake_case__ ):
A = line.strip()
if line:
A = line.split()
A = line_number
A = words[0]
A = value
return result
def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
for attribute in key.split('.' ):
A = getattr(snake_case__ , snake_case__ )
A = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
A = PARAM_MAPPING[full_name.split('.' )[-1]]
A = 'param'
if weight_type is not None and weight_type != "param":
A = getattr(snake_case__ , snake_case__ ).shape
elif weight_type is not None and weight_type == "param":
A = hf_pointer
for attribute in hf_param_name.split('.' ):
A = getattr(snake_case__ , snake_case__ )
A = shape_pointer.shape
# let's reduce dimension
A = value[0]
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
A = getattr(snake_case__ , snake_case__ )
A = value
else:
A = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
A = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case__ ):
A = PARAM_MAPPING[full_name.split('.' )[-1]]
A = 'param'
if weight_type is not None and weight_type != "param":
A = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
A = '.'.join([key, hf_param_name] )
else:
A = key
A = value if 'lm_head' in full_key else value[0]
_lowercase = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any , snake_case__ : List[Any]=None , snake_case__ : int=None ):
A = False
for key, mapped_key in MAPPING.items():
A = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A = True
if "*" in mapped_key:
A = name.split(snake_case__ )[0].split('.' )[-2]
A = mapped_key.replace('*' , snake_case__ )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
if hf_dict is not None:
rename_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return is_used
return is_used
def _snake_case ( snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : int ):
A = []
A = fairseq_model.state_dict()
A = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , )
A = True
else:
A = load_wavaveca_layer(snake_case__ , snake_case__ , snake_case__ )
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'Unused weights: {unused_weights}' )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] ):
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
A = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
A = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
A = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
A = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def _snake_case ( snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Tuple=None , snake_case__ : Tuple=True , snake_case__ : Optional[Any]=False ):
if config_path is not None:
A = WavaVecaConfig.from_pretrained(snake_case__ )
else:
A = WavaVecaConfig()
if is_seq_class:
A = read_txt_into_dict(snake_case__ )
A = idalabel
A = WavaVecaForSequenceClassification(snake_case__ )
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
feature_extractor.save_pretrained(snake_case__ )
elif is_finetuned:
if dict_path:
A = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(snake_case__ , 'vocab.json' )
if not os.path.isdir(snake_case__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
A = target_dict.indices
# fairseq has the <pad> and <s> switched
A = 0
A = 1
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(snake_case__ , snake_case__ )
A = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case__ , )
A = True if config.feat_extract_norm == 'layer' else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
A = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
A = WavaVecaForCTC(snake_case__ )
else:
A = WavaVecaForPreTraining(snake_case__ )
if is_finetuned or is_seq_class:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
A = argparse.Namespace(task='audio_pretraining' )
A = fairseq.tasks.setup_task(snake_case__ )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__ )
A = model[0].eval()
recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''LayoutLMv3FeatureExtractor''']
_lowercase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
A = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
A = 1 - (matter_density + radiation_density + dark_energy)
A = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
A = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowercase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 22 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 1 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int=1e-12 ):
A = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
A = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(snake_case__ , axis=1 ) , a_min=snake_case__ ) ).T
return jnp.matmul(snake_case__ , norm_emb_a.T )
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
_lowerCamelCase: CLIPConfig
_lowerCamelCase: jnp.dtype = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
A = FlaxCLIPVisionModule(self.config.vision_config )
A = nn.Dense(self.config.projection_dim ,use_bias=A_ ,dtype=self.dtype )
A = self.param('concept_embeds' ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
A = self.param(
'special_care_embeds' ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
A = self.param('concept_embeds_weights' ,jax.nn.initializers.ones ,(17,) )
A = self.param('special_care_embeds_weights' ,jax.nn.initializers.ones ,(3,) )
def __call__( self : Optional[int] ,A_ : str ) -> Optional[Any]:
A = self.vision_model(A_ )[1]
A = self.visual_projection(A_ )
A = jax_cosine_distance(A_ ,self.special_care_embeds )
A = jax_cosine_distance(A_ ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
A = 0.0
A = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
A = jnp.round(A_ ,3 )
A = jnp.any(special_scores > 0 ,axis=1 ,keepdims=A_ )
# Use a lower threshold if an image has any special care concept
A = is_special_care * 0.01
A = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
A = jnp.round(A_ ,3 )
A = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = CLIPConfig
_lowerCamelCase: Dict = '''clip_input'''
_lowerCamelCase: str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] ,A_ : CLIPConfig ,A_ : Optional[Tuple] = None ,A_ : int = 0 ,A_ : jnp.dtype = jnp.floataa ,A_ : bool = True ,**A_ : str ,) -> int:
if input_shape is None:
A = (1, 224, 224, 3)
A = self.module_class(config=A_ ,dtype=A_ ,**A_ )
super().__init__(A_ ,A_ ,input_shape=A_ ,seed=A_ ,dtype=A_ ,_do_init=_do_init )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : jax.random.KeyArray ,A_ : Tuple ,A_ : FrozenDict = None ) -> FrozenDict:
# init input tensor
A = jax.random.normal(A_ ,A_ )
A , A = jax.random.split(A_ )
A = {'params': params_rng, 'dropout': dropout_rng}
A = self.module.init(A_ ,A_ )['params']
return random_params
def __call__( self : str ,A_ : Optional[Any] ,A_ : dict = None ,) -> List[str]:
A = jnp.transpose(A_ ,(0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} ,jnp.array(A_ ,dtype=jnp.floataa ) ,rngs={} ,) | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowercase = logging.getLogger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = '''sequence-classification'''
def __init__( self : Any ,A_ : Union[str, Any] ) -> List[Any]:
if type(A_ ) == dict:
A = Namespace(**A_ )
A = glue_output_modes[hparams.task]
A = glue_tasks_num_labels[hparams.task]
super().__init__(A_ ,A_ ,self.mode )
def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : Dict ) -> str:
return self.model(**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[Any] ) -> List[Any]:
A = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
A = self(**A_ )
A = outputs[0]
A = self.trainer.lr_schedulers[0]['scheduler']
A = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
A = self.hparams
A = processors[args.task]()
A = processor.get_labels()
for mode in ["train", "dev"]:
A = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' ,A_ )
else:
logger.info('Creating features from dataset file at %s' ,args.data_dir )
A = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
A = convert_examples_to_features(
A_ ,self.tokenizer ,max_length=args.max_seq_length ,label_list=self.labels ,output_mode=args.glue_output_mode ,)
logger.info('Saving features into cached file %s' ,A_ )
torch.save(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : int ,A_ : bool = False ) -> DataLoader:
A = 'dev' if mode == 'test' else mode
A = self._feature_file(A_ )
logger.info('Loading features from cached file %s' ,A_ )
A = torch.load(A_ )
A = torch.tensor([f.input_ids for f in features] ,dtype=torch.long )
A = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long )
A = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A = torch.tensor([f.label for f in features] ,dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A = torch.tensor([f.label for f in features] ,dtype=torch.float )
return DataLoader(
TensorDataset(A_ ,A_ ,A_ ,A_ ) ,batch_size=A_ ,shuffle=A_ ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ,A_ : Optional[Any] ) -> Optional[int]:
A = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
A = self(**A_ )
A , A = outputs[:2]
A = logits.detach().cpu().numpy()
A = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ) -> tuple:
A = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
A = np.concatenate([x['pred'] for x in outputs] ,axis=0 )
if self.hparams.glue_output_mode == "classification":
A = np.argmax(A_ ,axis=1 )
elif self.hparams.glue_output_mode == "regression":
A = np.squeeze(A_ )
A = np.concatenate([x['target'] for x in outputs] ,axis=0 )
A = [[] for _ in range(out_label_ids.shape[0] )]
A = [[] for _ in range(out_label_ids.shape[0] )]
A = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task ,A_ ,A_ )}
A = dict(results.items() )
A = results
return ret, preds_list, out_label_list
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : list ) -> dict:
A , A , A = self._eval_end(A_ )
A = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ) -> dict:
A , A , A = self._eval_end(A_ )
A = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : int ,A_ : Optional[int] ) -> int:
BaseTransformer.add_model_specific_args(A_ ,A_ )
parser.add_argument(
'--max_seq_length' ,default=128 ,type=A_ ,help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) ,)
parser.add_argument(
'--task' ,default='' ,type=A_ ,required=A_ ,help='The GLUE task to run' ,)
parser.add_argument(
'--gpus' ,default=0 ,type=A_ ,help='The number of GPUs allocated for this, it is by default 0 meaning none' ,)
parser.add_argument(
'--overwrite_cache' ,action='store_true' ,help='Overwrite the cached training and evaluation sets' )
return parser
def _snake_case ( ):
A = argparse.ArgumentParser()
add_generic_args(snake_case__ , os.getcwd() )
A = GLUETransformer.add_model_specific_args(snake_case__ , os.getcwd() )
A = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A = os.path.join(
'./results' , F'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
A = GLUETransformer(snake_case__ )
A = generic_train(snake_case__ , snake_case__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=snake_case__ ) )
A = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(snake_case__ )
if __name__ == "__main__":
main() | 22 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
import numpy
# List of input, output pairs
_lowercase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_lowercase = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_lowercase = [2, 4, 1, 5]
_lowercase = len(train_data)
_lowercase = 0.009
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : str="train" ):
return calculate_hypothesis_value(snake_case__ , snake_case__ ) - output(
snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : Optional[int] ):
A = 0
for i in range(len(snake_case__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[str] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( snake_case__ : Any , snake_case__ : List[str] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( snake_case__ : Tuple , snake_case__ : Any=m ):
A = 0
for i in range(snake_case__ ):
if index == -1:
summation_value += _error(snake_case__ )
else:
summation_value += _error(snake_case__ ) * train_data[i][0][index]
return summation_value
def _snake_case ( snake_case__ : Optional[int] ):
A = summation_of_cost_derivative(snake_case__ , snake_case__ ) / m
return cost_derivative_value
def _snake_case ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A = 0.000002
A = 0
A = 0
while True:
j += 1
A = [0, 0, 0, 0]
for i in range(0 , len(snake_case__ ) ):
A = get_cost_derivative(i - 1 )
A = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case__ , snake_case__ , atol=snake_case__ , rtol=snake_case__ , ):
break
A = temp_parameter_vector
print(('Number of iterations:', j) )
def _snake_case ( ):
for i in range(len(snake_case__ ) ):
print(('Actual output value:', output(snake_case__ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(snake_case__ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent() | 22 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 1 |
"""simple docstring"""
import baseaa
def _snake_case ( snake_case__ : str ):
return baseaa.baaencode(string.encode('utf-8' ) )
def _snake_case ( snake_case__ : bytes ):
return baseaa.baadecode(snake_case__ ).decode('utf-8' )
if __name__ == "__main__":
_lowercase = '''Hello World!'''
_lowercase = baseaa_encode(test)
print(encoded)
_lowercase = baseaa_decode(encoded)
print(decoded) | 22 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''dpr'''
def __init__( self : Tuple ,A_ : int=3_0522 ,A_ : int=768 ,A_ : Any=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Optional[Any]="gelu" ,A_ : List[Any]=0.1 ,A_ : List[Any]=0.1 ,A_ : int=512 ,A_ : Union[str, Any]=2 ,A_ : Optional[int]=0.02 ,A_ : List[Any]=1e-12 ,A_ : str=0 ,A_ : str="absolute" ,A_ : int = 0 ,**A_ : Union[str, Any] ,) -> Optional[Any]:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = projection_dim
A = position_embedding_type | 22 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowercase = threading.Lock()
_lowercase = None
_lowercase = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_lowercase = logging.WARNING
_lowercase = True
def _snake_case ( ):
A = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def _snake_case ( ):
return __name__.split('.' )[0]
def _snake_case ( ):
return logging.getLogger(_get_library_name() )
def _snake_case ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
A = logging.StreamHandler() # Set sys.stderr as stream.
A = sys.stderr.flush
# Apply our default configuration to the library root logger.
A = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
A = False
def _snake_case ( ):
global _default_handler
with _lock:
if not _default_handler:
return
A = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
A = None
def _snake_case ( ):
return log_levels
def _snake_case ( snake_case__ : Optional[str] = None ):
if name is None:
A = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case__ )
def _snake_case ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _snake_case ( snake_case__ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case__ )
def _snake_case ( ):
return set_verbosity(snake_case__ )
def _snake_case ( ):
return set_verbosity(snake_case__ )
def _snake_case ( ):
return set_verbosity(snake_case__ )
def _snake_case ( ):
return set_verbosity(snake_case__ )
def _snake_case ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _snake_case ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _snake_case ( snake_case__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case__ )
def _snake_case ( snake_case__ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case__ )
def _snake_case ( ):
_configure_library_root_logger()
A = False
def _snake_case ( ):
_configure_library_root_logger()
A = True
def _snake_case ( ):
A = _get_library_root_logger().handlers
for handler in handlers:
A = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(snake_case__ )
def _snake_case ( ):
A = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case__ )
def _snake_case ( self : str , *snake_case__ : int , **snake_case__ : Tuple ):
A = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , snake_case__ )
if no_advisory_warnings:
return
self.warning(*snake_case__ , **snake_case__ )
_lowercase = warning_advice
@functools.lru_cache(snake_case__ )
def _snake_case ( self : Optional[Any] , *snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ):
self.warning(*snake_case__ , **snake_case__ )
_lowercase = warning_once
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : str ,*A_ : List[str] ,**A_ : Union[str, Any] ) -> Dict: # pylint: disable=unused-argument
A = args[0] if args else None
def __iter__( self : List[Any] ) -> Union[str, Any]:
return iter(self._iterator )
def __getattr__( self : Dict ,A_ : Dict ) -> str:
def empty_fn(*A_ : Dict ,**A_ : Optional[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ) -> Union[str, Any]:
return self
def __exit__( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Dict ) -> Tuple:
return
class lowerCAmelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] ,*A_ : Any ,**A_ : Dict ) -> List[str]:
if _tqdm_active:
return tqdm_lib.tqdm(*A_ ,**A_ )
else:
return EmptyTqdm(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[int] ,**A_ : Tuple ) -> Any:
A = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowercase = _tqdm_cls()
def _snake_case ( ):
global _tqdm_active
return bool(_tqdm_active )
def _snake_case ( ):
global _tqdm_active
A = True
hf_hub_utils.enable_progress_bars()
def _snake_case ( ):
global _tqdm_active
A = False
hf_hub_utils.disable_progress_bars() | 22 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = '''AutoTokenizer'''
_lowerCamelCase: Any = ['''tokenizer''']
_lowerCamelCase: Any = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : str ,A_ : List[Any] ,A_ : List[str]=None ) -> int:
super().__init__(A_ )
A = speaker_embeddings
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,A_ : int ,A_ : Any="speaker_embeddings_path.json" ,**A_ : Dict ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
A = get_file_from_repo(
A_ ,A_ ,subfolder=kwargs.pop('subfolder' ,A_ ) ,cache_dir=kwargs.pop('cache_dir' ,A_ ) ,force_download=kwargs.pop('force_download' ,A_ ) ,proxies=kwargs.pop('proxies' ,A_ ) ,resume_download=kwargs.pop('resume_download' ,A_ ) ,local_files_only=kwargs.pop('local_files_only' ,A_ ) ,use_auth_token=kwargs.pop('use_auth_token' ,A_ ) ,revision=kwargs.pop('revision' ,A_ ) ,)
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(A_ ,A_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
A = None
else:
with open(A_ ) as speaker_embeddings_json:
A = json.load(A_ )
else:
A = None
A = AutoTokenizer.from_pretrained(A_ ,**A_ )
return cls(tokenizer=A_ ,speaker_embeddings=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ,A_ : Dict="speaker_embeddings_path.json" ,A_ : Optional[int]="speaker_embeddings" ,A_ : bool = False ,**A_ : Optional[int] ,) -> int:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A_ ,A_ ,'v2' ) ,exist_ok=A_ )
A = {}
A = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A = self._load_voice_preset(A_ )
A = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,A_ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=A_ ,)
A = os.path.join(A_ ,F'{prompt_key}_{key}.npy' )
A = tmp_dict
with open(os.path.join(A_ ,A_ ) ,'w' ) as fp:
json.dump(A_ ,A_ )
super().save_pretrained(A_ ,A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str = None ,**A_ : List[str] ) -> List[Any]:
A = self.speaker_embeddings[voice_preset]
A = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
A = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,A_ ) ,cache_dir=kwargs.pop('cache_dir' ,A_ ) ,force_download=kwargs.pop('force_download' ,A_ ) ,proxies=kwargs.pop('proxies' ,A_ ) ,resume_download=kwargs.pop('resume_download' ,A_ ) ,local_files_only=kwargs.pop('local_files_only' ,A_ ) ,use_auth_token=kwargs.pop('use_auth_token' ,A_ ) ,revision=kwargs.pop('revision' ,A_ ) ,)
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
A = np.load(A_ )
return voice_preset_dict
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[dict] = None ) -> Optional[Any]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Any ,A_ : Union[str, Any]=None ,A_ : Optional[Any]=None ,A_ : Optional[int]="pt" ,A_ : Optional[int]=256 ,A_ : str=False ,A_ : List[Any]=True ,A_ : Tuple=False ,**A_ : Union[str, Any] ,) -> Tuple:
if voice_preset is not None and not isinstance(A_ ,A_ ):
if (
isinstance(A_ ,A_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A = self._load_voice_preset(A_ )
else:
if isinstance(A_ ,A_ ) and not voice_preset.endswith('.npz' ):
A = voice_preset + '.npz'
A = np.load(A_ )
if voice_preset is not None:
self._validate_voice_preset_dict(A_ ,**A_ )
A = BatchFeature(data=A_ ,tensor_type=A_ )
A = self.tokenizer(
A_ ,return_tensors=A_ ,padding='max_length' ,max_length=A_ ,return_attention_mask=A_ ,return_token_type_ids=A_ ,add_special_tokens=A_ ,**A_ ,)
if voice_preset is not None:
A = voice_preset
return encoded_text | 22 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
_lowercase = '''docs/source/en/_toctree.yml'''
def _snake_case ( snake_case__ : List[Any] ):
A = defaultdict(snake_case__ )
for doc in model_doc:
counts[doc["local"]] += 1
A = [key for key, value in counts.items() if value > 1]
A = []
for duplicate_key in duplicates:
A = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(snake_case__ ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() )
def _snake_case ( snake_case__ : List[str]=False ):
with open(snake_case__ , encoding='utf-8' ) as f:
A = yaml.safe_load(f.read() )
# Get to the API doc
A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
A = content[api_idx]['sections']
# Then to the model doc
A = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
A = api_doc[model_idx]['sections']
A = [(idx, section) for idx, section in enumerate(snake_case__ ) if 'sections' in section]
A = False
for idx, modality_doc in modalities_docs:
A = modality_doc['sections']
A = clean_model_doc_toc(snake_case__ )
if old_modality_doc != new_modality_doc:
A = True
if overwrite:
A = new_modality_doc
if diff:
if overwrite:
A = model_doc
A = api_doc
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_model_doc(args.fix_and_overwrite) | 22 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 1 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
try:
with open(snake_case__ , 'rb' ) as flax_state_f:
A = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Union[str, Any] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
A = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
A = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A = ''
A = flatten_dict(snake_case__ , sep='.' )
A = pt_model.state_dict()
# keep track of unexpected & missing keys
A = []
A = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A = flax_key_tuple_array[:-1] + ['weight']
A = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A = flax_key_tuple_array[:-1] + ['weight']
A = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
A = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
A = '.'.join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model | 22 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( snake_case__ : str = "AAPL" ):
A = F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
A = BeautifulSoup(requests.get(snake_case__ ).text , 'html.parser' )
A = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""") | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Union[str, Any] ,A_ : Any=None ,A_ : Optional[Any]=True ,A_ : List[str]=None ,**A_ : str ) -> str:
A = parent
A = config_class
A = has_text_modality
A = kwargs
A = common_properties
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
A = self.config_class(**self.inputs_dict )
A = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(A_ ,A_ ) ,msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(A_ ):
try:
setattr(A_ ,A_ ,A_ )
self.parent.assertEqual(
getattr(A_ ,A_ ) ,A_ ,msg=F'`{name} value {idx} expected, but was {getattr(A_ ,A_ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(A_ ):
try:
A = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(A_ ,A_ ) ,A_ ,msg=F'`{name} value {idx} expected, but was {getattr(A_ ,A_ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = self.config_class(**self.inputs_dict )
A = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'config.json' )
config_first.to_json_file(A_ )
A = self.config_class.from_json_file(A_ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(A_ )
A = self.config_class.from_pretrained(A_ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = self.config_class(**self.inputs_dict )
A = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,A_ )
config_first.save_pretrained(A_ )
A = self.config_class.from_pretrained(A_ ,subfolder=A_ )
self.parent.assertEqual(config_second.to_dict() ,config_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
A = self.config_class(**self.inputs_dict ,num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) ,5 )
self.parent.assertEqual(len(config.labelaid ) ,5 )
A = 3
self.parent.assertEqual(len(config.idalabel ) ,3 )
self.parent.assertEqual(len(config.labelaid ) ,3 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
if self.config_class.is_composition:
return
A = self.config_class()
self.parent.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = copy.deepcopy(A_ )
A = self.config_class(**A_ )
A = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(A_ ,A_ ) != value:
wrong_values.append((key, getattr(A_ ,A_ ), value) )
if len(A_ ) > 0:
A = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init() | 22 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : Any , snake_case__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
A = (boundary[1] - boundary[0]) / steps
A = boundary[0]
A = boundary[1]
A = make_points(snake_case__ , snake_case__ , snake_case__ )
A = 0.0
y += (h / 2.0) * f(snake_case__ )
for i in x_i:
# print(i)
y += h * f(snake_case__ )
y += (h / 2.0) * f(snake_case__ )
return y
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
A = a + h
while x < (b - h):
yield x
A = x + h
def _snake_case ( snake_case__ : str ): # enter your function here
A = (x - 0) * (x - 0)
return y
def _snake_case ( ):
A = 0.0 # Lower bound of integration
A = 1.0 # Upper bound of integration
A = 10.0 # define number of steps or resolution
A = [a, b] # define boundary of integration
A = method_a(snake_case__ , snake_case__ )
print(F'y = {y}' )
if __name__ == "__main__":
main() | 22 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
A = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_euler' )
A = 'A painting of a squirrel eating a burger'
A = torch.manual_seed(0 )
A = sd_pipe([prompt] ,generator=A_ ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type='np' )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_euler' )
A = 'A painting of a squirrel eating a burger'
A = torch.manual_seed(0 )
A = sd_pipe([prompt] ,generator=A_ ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type='np' )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
A = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
A = 'A painting of a squirrel eating a burger'
A = torch.manual_seed(0 )
A = sd_pipe(
[prompt] ,generator=A_ ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type='np' ,use_karras_sigmas=A_ ,)
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 22 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list[int] , snake_case__ : int ):
A = len(snake_case__ )
A = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
A = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
A = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
A = subset[i - 1][j]
if arr[i - 1] <= j:
A = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 1 |
"""simple docstring"""
import requests
_lowercase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def _snake_case ( snake_case__ : str ):
# fetching a list of articles in json format
A = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''') | 22 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _snake_case ( snake_case__ : List[str] , snake_case__ : List[Any]=False ):
try:
A = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A = default
else:
# KEY is set, convert it to True or False.
try:
A = strtobool(snake_case__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
_lowercase = parse_flag_from_env('''RUN_SLOW''', default=False)
_lowercase = parse_flag_from_env('''RUN_REMOTE''', default=False)
_lowercase = parse_flag_from_env('''RUN_LOCAL''', default=True)
_lowercase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
_lowercase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
_lowercase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
_lowercase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
_lowercase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
_lowercase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
_lowercase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
_lowercase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _snake_case ( snake_case__ : int ):
try:
import faiss # noqa
except ImportError:
A = unittest.skip('test requires faiss' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : Dict ):
try:
import regex # noqa
except ImportError:
A = unittest.skip('test requires regex' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : List[str] ):
try:
import elasticsearch # noqa
except ImportError:
A = unittest.skip('test requires elasticsearch' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : int ):
try:
import sqlalchemy # noqa
except ImportError:
A = unittest.skip('test requires sqlalchemy' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : List[Any] ):
if not config.TORCH_AVAILABLE:
A = unittest.skip('test requires PyTorch' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : Optional[Any] ):
if not config.TF_AVAILABLE:
A = unittest.skip('test requires TensorFlow' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : Tuple ):
if not config.JAX_AVAILABLE:
A = unittest.skip('test requires JAX' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : List[Any] ):
if not config.PIL_AVAILABLE:
A = unittest.skip('test requires Pillow' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : Any ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(snake_case__ )
else:
return test_case
def _snake_case ( snake_case__ : Any ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(snake_case__ )
else:
return test_case
def _snake_case ( snake_case__ : Union[str, Any] ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(snake_case__ )
else:
return test_case
def _snake_case ( snake_case__ : Tuple ):
def _require_spacy_model(snake_case__ : Union[str, Any] ):
try:
import spacy # noqa F401
spacy.load(snake_case__ )
except ImportError:
return unittest.skip('test requires spacy' )(snake_case__ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(snake_case__ ) )(snake_case__ )
else:
return test_case
return _require_spacy_model
def _snake_case ( snake_case__ : Optional[int] ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(snake_case__ )
else:
return test_case
def _snake_case ( snake_case__ : str ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(snake_case__ )
else:
return test_case
def _snake_case ( snake_case__ : Tuple ):
if not _run_slow_tests or _run_slow_tests == 0:
A = unittest.skip('test is slow' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : Optional[int] ):
if not _run_local_tests or _run_local_tests == 0:
A = unittest.skip('test is local' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : Union[str, Any] ):
if not _run_packaged_tests or _run_packaged_tests == 0:
A = unittest.skip('test is packaged' )(snake_case__ )
return test_case
def _snake_case ( snake_case__ : str ):
if not _run_remote_tests or _run_remote_tests == 0:
A = unittest.skip('test requires remote' )(snake_case__ )
return test_case
def _snake_case ( *snake_case__ : Union[str, Any] ):
def decorate(cls : Tuple ):
for name, fn in cls.__dict__.items():
if callable(snake_case__ ) and name.startswith('test' ):
for decorator in decorators:
A = decorator(snake_case__ )
setattr(cls , snake_case__ , snake_case__ )
return cls
return decorate
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
pass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = 0
_lowerCamelCase: Dict = 1
_lowerCamelCase: Any = 2
@contextmanager
def _snake_case ( snake_case__ : Optional[Any]=OfflineSimulationMode.CONNECTION_FAILS , snake_case__ : Optional[Any]=1e-16 ):
A = requests.Session().request
def timeout_request(snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Dict , **snake_case__ : Tuple ):
# Change the url to an invalid url so that the connection hangs
A = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
A = timeout
try:
return online_request(snake_case__ , snake_case__ , **snake_case__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
A = url
A = e.args[0]
A = (max_retry_error.args[0].replace('10.255.255.1' , F'OfflineMock[{url}]' ),)
A = (max_retry_error,)
raise
def raise_connection_error(snake_case__ : Optional[Any] , snake_case__ : Dict , **snake_case__ : int ):
raise requests.ConnectionError('Offline mode is enabled.' , request=snake_case__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , snake_case__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , snake_case__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case__ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def _snake_case ( *snake_case__ : str , **snake_case__ : Tuple ):
A = str(Path().resolve() )
with tempfile.TemporaryDirectory(*snake_case__ , **snake_case__ ) as tmp_dir:
try:
os.chdir(snake_case__ )
yield
finally:
os.chdir(snake_case__ )
@contextmanager
def _snake_case ( ):
import gc
gc.collect()
A = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _snake_case ( ):
import gc
gc.collect()
A = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Any ):
return deepcopy(snake_case__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(snake_case__ ).integers(0 , 100 , 10 ).tolist()
def _snake_case ( snake_case__ : Dict ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case__ : Optional[Any] , *snake_case__ : List[Any] , **snake_case__ : List[Any] ):
try:
return func(*snake_case__ , **snake_case__ )
except HTTPError as err:
if str(snake_case__ ).startswith('500' ) or str(snake_case__ ).startswith('502' ):
pytest.xfail(str(snake_case__ ) )
raise err
return decorator.decorator(_wrapper , snake_case__ )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : Union[str, Any] ,A_ : int ,A_ : List[Any] ) -> Any:
A = returncode
A = stdout
A = stderr
async def _snake_case ( snake_case__ : int , snake_case__ : List[str] ):
while True:
A = await stream.readline()
if line:
callback(snake_case__ )
else:
break
async def _snake_case ( snake_case__ : List[Any] , snake_case__ : Dict=None , snake_case__ : int=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=False , snake_case__ : str=False ):
if echo:
print('\nRunning: ' , ' '.join(snake_case__ ) )
A = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A = []
A = []
def tee(snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any]="" ):
A = line.decode('utf-8' ).rstrip()
sink.append(snake_case__ )
if not quiet:
print(snake_case__ , snake_case__ , file=snake_case__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stderr , label='stderr:' ) ),
] , timeout=snake_case__ , )
return _RunOutput(await p.wait() , snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=180 , snake_case__ : Dict=False , snake_case__ : List[str]=True ):
A = asyncio.get_event_loop()
A = loop.run_until_complete(
_stream_subprocess(snake_case__ , env=snake_case__ , stdin=snake_case__ , timeout=snake_case__ , quiet=snake_case__ , echo=snake_case__ ) )
A = ' '.join(snake_case__ )
if result.returncode > 0:
A = '\n'.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def _snake_case ( ):
A = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
A = re.sub(r'^gw' , '' , snake_case__ , 0 , re.M )
return int(snake_case__ )
def _snake_case ( ):
A = 2_9500
A = pytest_xdist_worker_id()
return port + uniq_delta | 22 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = 0
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
A = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
A = Path(A_ ) / 'preprocessor_config.json'
A = Path(A_ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,)
json.dump({'model_type': 'clip'} ,open(A_ ,'w' ) )
A = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
A = Path(A_ ) / 'preprocessor_config.json'
A = Path(A_ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,)
json.dump({'model_type': 'clip'} ,open(A_ ,'w' ) )
A = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A = Path(A_ ) / 'preprocessor_config.json'
A = Path(A_ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,)
json.dump({'model_type': 'clip'} ,open(A_ ,'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A = AutoImageProcessor.from_pretrained(A_ ).to_dict()
config_dict.pop('image_processor_type' )
A = CLIPImageProcessor(**A_ )
# save in new folder
model_config.save_pretrained(A_ )
config.save_pretrained(A_ )
A = AutoImageProcessor.from_pretrained(A_ )
# make sure private variable is not incorrectly saved
A = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
A = Path(A_ ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,)
A = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
with self.assertRaisesRegex(
A_ ,'clip-base is not a local folder and is not a valid model identifier' ):
A = AutoImageProcessor.from_pretrained('clip-base' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
with self.assertRaisesRegex(
A_ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
A = AutoImageProcessor.from_pretrained(A_ ,revision='aaaaaa' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
with self.assertRaisesRegex(
A_ ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
A = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A_ ):
A = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A_ ):
A = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=A_ )
A = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
A = AutoImageProcessor.from_pretrained(A_ ,trust_remote_code=A_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,'NewImageProcessor' )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
try:
AutoConfig.register('custom' ,A_ )
AutoImageProcessor.register(A_ ,A_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A_ ):
AutoImageProcessor.register(A_ ,A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
A = Path(A_ ) / 'preprocessor_config.json'
A = Path(A_ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} ,open(A_ ,'w' ) ,)
json.dump({'model_type': 'clip'} ,open(A_ ,'w' ) )
A = CustomImageProcessor.from_pretrained(A_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(A_ )
A = AutoImageProcessor.from_pretrained(A_ )
self.assertIsInstance(A_ ,A_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = True
try:
AutoConfig.register('custom' ,A_ )
AutoImageProcessor.register(A_ ,A_ )
# If remote code is not set, the default is to use local
A = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=A_ )
self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' )
self.assertTrue(not hasattr(A_ ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 22 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''sentencepiece.model'''}
_lowercase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
_lowercase = {
'''google/rembert''': 2_56,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict ,A_ : List[Any] ,A_ : Any=False ,A_ : List[str]=True ,A_ : Tuple=True ,A_ : Optional[Any]="[CLS]" ,A_ : Any="[SEP]" ,A_ : Any="[UNK]" ,A_ : str="[SEP]" ,A_ : int="[PAD]" ,A_ : Optional[Any]="[CLS]" ,A_ : Dict="[MASK]" ,**A_ : Any ,) -> Any:
super().__init__(
do_lower_case=A_ ,remove_space=A_ ,keep_accents=A_ ,bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,**A_ ,)
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = spm.SentencePieceProcessor()
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return len(self.sp_model )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Dict:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[Any] ,A_ : str ) -> Dict:
A = d
A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ,A_ : Optional[Any]=False ) -> Optional[int]:
A = self.sp_model.EncodeAsPieces(A_ )
return pieces
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ) -> Dict:
return self.sp_model.PieceToId(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Any ) -> Optional[Any]:
return self.sp_model.IdToPiece(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ) -> Optional[Any]:
A = self.sp_model.decode_pieces(A_ )
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file ,A_ )
return (out_vocab_file,) | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : Optional[Any] ):
# initialize config
if "resnet-50" in model_name:
A = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
A = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
A = DetrConfig(use_timm_backbone=snake_case__ , backbone_config=snake_case__ )
# set label attributes
A = 'panoptic' in model_name
if is_panoptic:
A = 250
else:
A = 91
A = 'huggingface/label-files'
A = 'coco-detection-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def _snake_case ( snake_case__ : List[str] ):
# here we list all keys to be renamed (original name on the left, our name on the right)
A = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean',
) )
rename_keys.append(
(
F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var',
F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'transformer.encoder.layers.{i}.self_attn.out_proj.weight',
F'encoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'transformer.decoder.layers.{i}.self_attn.out_proj.weight',
F'decoder.layers.{i}.self_attn.out_proj.weight',
) )
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
) )
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
) )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def _snake_case ( snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : str ):
A = state_dict.pop(snake_case__ )
A = val
def _snake_case ( snake_case__ : int , snake_case__ : Tuple=False ):
A = ''
if is_panoptic:
A = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A = in_proj_weight_cross_attn[:256, :]
A = in_proj_bias_cross_attn[:256]
A = in_proj_weight_cross_attn[256:512, :]
A = in_proj_bias_cross_attn[256:512]
A = in_proj_weight_cross_attn[-256:, :]
A = in_proj_bias_cross_attn[-256:]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _snake_case ( snake_case__ : Tuple , snake_case__ : List[Any]=None , snake_case__ : str=False ):
A , A = get_detr_config(snake_case__ )
# load original model from torch hub
A = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'Converting model {model_name}...' )
A = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=snake_case__ ).eval()
A = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(snake_case__ ):
if is_panoptic:
A = 'detr.' + src
rename_key(snake_case__ , snake_case__ , snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ , is_panoptic=snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
A = state_dict.pop(snake_case__ )
A = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A = state_dict.pop(snake_case__ )
A = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
A = state_dict.pop(snake_case__ )
A = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
A = state_dict.pop(snake_case__ )
A = val
# finally, create HuggingFace model and load state dict
A = DetrForSegmentation(snake_case__ ) if is_panoptic else DetrForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion on an image
A = 'coco_panoptic' if is_panoptic else 'coco_detection'
A = DetrImageProcessor(format=snake_case__ )
A = processor(images=prepare_img() , return_tensors='pt' )
A = encoding['pixel_values']
A = detr(snake_case__ )
A = model(snake_case__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'nielsr/{model_name}' )
processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
_lowercase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 22 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = KandinskyImgaImgPipeline
_lowerCamelCase: Dict = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
_lowerCamelCase: Any = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowerCamelCase: Dict = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCamelCase: Tuple = False
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
torch.manual_seed(0 )
A = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1005 ,)
A = MultilingualCLIP(A_ )
A = text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A = UNetaDConditionModel(**A_ )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
torch.manual_seed(0 )
A = VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.dummy_text_encoder
A = self.dummy_tokenizer
A = self.dummy_unet
A = self.dummy_movq
A = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
A = DDIMScheduler(**A_ )
A = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Any ,A_ : Tuple=0 ) -> int:
A = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(A_ ) ).to(A_ )
A = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
A = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A_ ) ).to(A_ )
A = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
A = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = pipe(**self.get_dummy_inputs(A_ ) )
A = output.images
A = pipe(
**self.get_dummy_inputs(A_ ) ,return_dict=A_ ,)[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A = 'A red cartoon frog, 4k'
A = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(A_ )
A = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' ,torch_dtype=torch.floataa )
A = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
A = torch.Generator(device='cpu' ).manual_seed(0 )
A , A = pipe_prior(
A_ ,generator=A_ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
A = pipeline(
A_ ,image=A_ ,image_embeds=A_ ,negative_image_embeds=A_ ,generator=A_ ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type='np' ,)
A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ ,A_ ) | 22 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
_lowercase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : int = 101 ) -> Tuple:
A = length
def __len__( self : int ) -> Any:
return self.length
def __getitem__( self : Optional[int] ,A_ : List[Any] ) -> int:
return i
class lowerCAmelCase_ :
'''simple docstring'''
def __call__( self : str ,A_ : List[Any] ) -> Any:
return {"input_ids": torch.tensor(A_ ), "labels": torch.tensor(A_ )}
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A = nn.Linear(120 ,80 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tuple ,A_ : Tuple=None ) -> Optional[Any]:
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@require_torch_neuroncore
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
A = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
A = self.get_auto_remove_tmp_dir()
A = F'--output_dir {output_dir}'.split()
A = ['torchrun'] + distributed_args + args
execute_subprocess_async(A_ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@require_torch_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
A = self.get_auto_remove_tmp_dir()
A = F'--output_dir {output_dir}'.split()
A = ['torchrun'] + distributed_args + args
execute_subprocess_async(A_ ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
_lowercase = HfArgumentParser((TrainingArguments,))
_lowercase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
_lowercase = DummyDataset(dataset_length)
def _snake_case ( snake_case__ : EvalPrediction ):
A = list(range(len(snake_case__ ) ) )
A = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
_lowercase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
_lowercase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_lowercase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_lowercase = 2
_lowercase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
_lowercase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
_lowercase = None | 22 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
import re
def _snake_case ( snake_case__ : str ):
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def _snake_case ( snake_case__ : str ):
A = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _snake_case ( snake_case__ : str , snake_case__ : bool , snake_case__ : str ):
try:
A = split_input(snake_case__ )
if upper:
A = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
A = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _snake_case ( snake_case__ : str ):
return to_simple_case(snake_case__ )
def _snake_case ( snake_case__ : str ):
try:
A = to_simple_case(snake_case__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _snake_case ( snake_case__ : str , snake_case__ : bool ):
return to_complex_case(snake_case__ , snake_case__ , '_' )
def _snake_case ( snake_case__ : str , snake_case__ : bool ):
return to_complex_case(snake_case__ , snake_case__ , '-' )
if __name__ == "__main__":
__import__('''doctest''').testmod() | 22 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _snake_case ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Any=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
A = torch.tensor(tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ).unsqueeze(0 ) # Batch size 1
A = model(snake_case__ )[0] # The last hidden-state is the first element of the output tuple
A = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
A = logits[0, masked_index, :]
A = logits.softmax(dim=0 )
A , A = prob.topk(k=snake_case__ , dim=0 )
A = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case__ ) )] )
A = tokenizer.mask_token
A = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
A = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(snake_case__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(snake_case__ ) , snake_case__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case__ , snake_case__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_lowercase = CamembertTokenizer.from_pretrained('''camembert-base''')
_lowercase = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
_lowercase = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 22 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : PreTrainedTokenizer , snake_case__ : int , snake_case__ : Optional[int] = None , ):
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv' , data_files=snake_case__ )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(snake_case__ )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(snake_case__ )}
A = tokenizer.model_input_names
A = {}
if len(snake_case__ ) == 1:
for k in files.keys():
A = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding='max_length' ) , batched=snake_case__ , )
elif len(snake_case__ ) == 2:
for k in files.keys():
A = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding='max_length' , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: int = field(metadata={'''help''': '''Which column contains the label'''} )
_lowerCamelCase: str = field(default=_lowercase , metadata={'''help''': '''The path of the training file'''} )
_lowerCamelCase: Optional[str] = field(default=_lowercase , metadata={'''help''': '''The path of the development file'''} )
_lowerCamelCase: Optional[str] = field(default=_lowercase , metadata={'''help''': '''The path of the test file'''} )
_lowerCamelCase: int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCamelCase: Optional[str] = field(
default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A , A , A , A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__ ) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(snake_case__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(snake_case__ )
return results
if __name__ == "__main__":
main() | 22 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 1 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _snake_case ( snake_case__ : SplitDict ):
A = split_dict._to_yaml_list()
assert len(snake_case__ ) == len(snake_case__ )
A = SplitDict._from_yaml_list(snake_case__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
A = None
# the split name of split_dict takes over the name of the split info object
A = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=snake_case__ ), SplitInfo(dataset_name='my_dataset' )] )
def _snake_case ( snake_case__ : Dict ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
A = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 22 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
snake_case__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : int = 10**9 ):
A = 1
A = 2
A = 0
A = 0
A = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""") | 22 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
A = inspect.getfile(accelerate.test_utils )
A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
print(F'Found {torch.cuda.device_count()} devices.' )
A = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ ,env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
print(F'Found {torch.cuda.device_count()} devices.' )
A = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ ,env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
A = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ ,env=os.environ.copy() )
@require_multi_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
A = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices='0,1' ):
execute_subprocess_async(A_ ,env=os.environ.copy() )
if __name__ == "__main__":
_lowercase = Accelerator()
_lowercase = (accelerator.state.process_index + 2, 10)
_lowercase = torch.randint(0, 10, shape).to(accelerator.device)
_lowercase = ''''''
_lowercase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowercase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowercase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : Union[int, Iterable[int]] , snake_case__ : bool , snake_case__ : int ):
def constraint_to_multiple_of(snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=None ):
A = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A = math.floor(val / multiple ) * multiple
if x < min_val:
A = math.ceil(val / multiple ) * multiple
return x
A = (output_size, output_size) if isinstance(snake_case__ , snake_case__ ) else output_size
A , A = get_image_size(snake_case__ )
A , A = output_size
# determine new height and width
A = output_height / input_height
A = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A = scale_width
else:
# fit height
A = scale_height
A = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case__ )
A = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case__ )
return (new_height, new_width)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = False ,A_ : int = 1 ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Any ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'height': 384, 'width': 384}
A = get_size_dict(A_ )
A = do_resize
A = size
A = keep_aspect_ratio
A = ensure_multiple_of
A = resample
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : bool = False ,A_ : int = 1 ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
A = get_resize_output_image_size(
A_ ,output_size=(size['height'], size['width']) ,keep_aspect_ratio=A_ ,multiple=A_ ,)
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : np.ndarray ,A_ : Union[int, float] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ,) -> Tuple:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Tuple ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : ImageInput ,A_ : bool = None ,A_ : int = None ,A_ : bool = None ,A_ : int = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : float = None ,A_ : bool = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : ChannelDimension = ChannelDimension.FIRST ,**A_ : Optional[Any] ,) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ )
A = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A = resample if resample is not None else self.resample
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : List[Tuple] = None ) -> Tuple:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list ):
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def _snake_case ( snake_case__ : list[float] ):
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
A = (
'Expected a list of numbers as input, found '
F'{type(snake_case__ ).__name__}'
)
raise TypeError(snake_case__ )
else:
A = F'Expected a list of numbers as input, found {type(snake_case__ ).__name__}'
raise TypeError(snake_case__ )
else:
raise ValueError('Missing an input' )
def _snake_case ( snake_case__ : list , snake_case__ : list ):
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 1 |
"""simple docstring"""
from math import factorial
_lowercase = {str(d): factorial(d) for d in range(10)}
def _snake_case ( snake_case__ : int ):
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case__ ) )
def _snake_case ( ):
A = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case__ ) if sum_of_digit_factorial(snake_case__ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""") | 22 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __get__( self : Optional[int] ,A_ : int ,A_ : Tuple=None ) -> Dict:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
A = '__cached_' + self.fget.__name__
A = getattr(A_ ,A_ ,A_ )
if cached is None:
A = self.fget(A_ )
setattr(A_ ,A_ ,A_ )
return cached
def _snake_case ( snake_case__ : Union[str, Any] ):
A = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def _snake_case ( snake_case__ : str ):
if is_torch_fx_proxy(snake_case__ ):
return True
if is_torch_available():
import torch
if isinstance(snake_case__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(snake_case__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(snake_case__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(snake_case__ , np.ndarray )
def _snake_case ( snake_case__ : Optional[Any] ):
return isinstance(snake_case__ , np.ndarray )
def _snake_case ( snake_case__ : List[Any] ):
return _is_numpy(snake_case__ )
def _snake_case ( snake_case__ : List[Any] ):
import torch
return isinstance(snake_case__ , torch.Tensor )
def _snake_case ( snake_case__ : List[str] ):
return False if not is_torch_available() else _is_torch(snake_case__ )
def _snake_case ( snake_case__ : List[Any] ):
import torch
return isinstance(snake_case__ , torch.device )
def _snake_case ( snake_case__ : Optional[int] ):
return False if not is_torch_available() else _is_torch_device(snake_case__ )
def _snake_case ( snake_case__ : Optional[int] ):
import torch
if isinstance(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , snake_case__ ):
A = getattr(snake_case__ , snake_case__ )
else:
return False
return isinstance(snake_case__ , torch.dtype )
def _snake_case ( snake_case__ : List[Any] ):
return False if not is_torch_available() else _is_torch_dtype(snake_case__ )
def _snake_case ( snake_case__ : Dict ):
import tensorflow as tf
return isinstance(snake_case__ , tf.Tensor )
def _snake_case ( snake_case__ : str ):
return False if not is_tf_available() else _is_tensorflow(snake_case__ )
def _snake_case ( snake_case__ : List[Any] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(snake_case__ , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(snake_case__ )
return type(snake_case__ ) == tf.Tensor
def _snake_case ( snake_case__ : str ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case__ )
def _snake_case ( snake_case__ : List[str] ):
import jax.numpy as jnp # noqa: F811
return isinstance(snake_case__ , jnp.ndarray )
def _snake_case ( snake_case__ : int ):
return False if not is_flax_available() else _is_jax(snake_case__ )
def _snake_case ( snake_case__ : List[Any] ):
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_py_obj(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return [to_py_obj(snake_case__ ) for o in obj]
elif is_tf_tensor(snake_case__ ):
return obj.numpy().tolist()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ ).tolist()
elif isinstance(snake_case__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _snake_case ( snake_case__ : int ):
if isinstance(snake_case__ , (dict, UserDict) ):
return {k: to_numpy(snake_case__ ) for k, v in obj.items()}
elif isinstance(snake_case__ , (list, tuple) ):
return np.array(snake_case__ )
elif is_tf_tensor(snake_case__ ):
return obj.numpy()
elif is_torch_tensor(snake_case__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(snake_case__ ):
return np.asarray(snake_case__ )
else:
return obj
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = fields(self )
# Safety and consistency checks
if not len(A_ ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
A = getattr(self ,class_fields[0].name )
A = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(A_ ):
if isinstance(A_ ,A_ ):
A = first_field.items()
A = True
else:
try:
A = iter(A_ )
A = True
except TypeError:
A = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(A_ ):
if (
not isinstance(A_ ,(list, tuple) )
or not len(A_ ) == 2
or not isinstance(element[0] ,A_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
A = element[1]
elif first_field is not None:
A = first_field
else:
for field in class_fields:
A = getattr(self ,field.name )
if v is not None:
A = v
def __delitem__( self : Optional[int] ,*A_ : Optional[int] ,**A_ : Dict ) -> Tuple:
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def _SCREAMING_SNAKE_CASE ( self : int ,*A_ : Optional[int] ,**A_ : List[Any] ) -> Dict:
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Dict:
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : Union[str, Any] ,**A_ : Dict ) -> Dict:
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : int ,A_ : Any ) -> str:
if isinstance(A_ ,A_ ):
A = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any] ,A_ : int ,A_ : Dict ) -> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(A_ ,A_ )
super().__setattr__(A_ ,A_ )
def __setitem__( self : int ,A_ : Dict ,A_ : Tuple ) -> int:
# Will raise a KeyException if needed
super().__setitem__(A_ ,A_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,A_ : int ) -> Tuple:
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = '''longest'''
_lowerCamelCase: Optional[int] = '''max_length'''
_lowerCamelCase: Any = '''do_not_pad'''
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = '''pt'''
_lowerCamelCase: Union[str, Any] = '''tf'''
_lowerCamelCase: Tuple = '''np'''
_lowerCamelCase: Dict = '''jax'''
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : List[ContextManager] ) -> Any:
A = context_managers
A = ExitStack()
def __enter__( self : Tuple ) -> List[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(A_ )
def __exit__( self : int ,*A_ : str ,**A_ : str ) -> Any:
self.stack.__exit__(*A_ ,**A_ )
def _snake_case ( snake_case__ : Union[str, Any] ):
A = infer_framework(snake_case__ )
if framework == "tf":
A = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A = inspect.signature(model_class.forward ) # PyTorch models
else:
A = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _snake_case ( snake_case__ : List[str] ):
A = model_class.__name__
A = infer_framework(snake_case__ )
if framework == "tf":
A = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A = inspect.signature(model_class.forward ) # PyTorch models
else:
A = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _snake_case ( snake_case__ : MutableMapping , snake_case__ : str = "" , snake_case__ : str = "." ):
def _flatten_dict(snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]="" , snake_case__ : str="." ):
for k, v in d.items():
A = str(snake_case__ ) + delimiter + str(snake_case__ ) if parent_key else k
if v and isinstance(snake_case__ , snake_case__ ):
yield from flatten_dict(snake_case__ , snake_case__ , delimiter=snake_case__ ).items()
else:
yield key, v
return dict(_flatten_dict(snake_case__ , snake_case__ , snake_case__ ) )
@contextmanager
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _snake_case ( snake_case__ : int , snake_case__ : Union[str, Any]=None ):
if is_numpy_array(snake_case__ ):
return np.transpose(snake_case__ , axes=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.T if axes is None else array.permute(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.transpose(snake_case__ , perm=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.transpose(snake_case__ , axes=snake_case__ )
else:
raise ValueError(F'Type not supported for transpose: {type(snake_case__ )}.' )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
if is_numpy_array(snake_case__ ):
return np.reshape(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.reshape(*snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.reshape(snake_case__ , snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.reshape(snake_case__ , snake_case__ )
else:
raise ValueError(F'Type not supported for reshape: {type(snake_case__ )}.' )
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Any=None ):
if is_numpy_array(snake_case__ ):
return np.squeeze(snake_case__ , axis=snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.squeeze() if axis is None else array.squeeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.squeeze(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.squeeze(snake_case__ , axis=snake_case__ )
else:
raise ValueError(F'Type not supported for squeeze: {type(snake_case__ )}.' )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : int ):
if is_numpy_array(snake_case__ ):
return np.expand_dims(snake_case__ , snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.unsqueeze(dim=snake_case__ )
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.expand_dims(snake_case__ , axis=snake_case__ )
elif is_jax_tensor(snake_case__ ):
return jnp.expand_dims(snake_case__ , axis=snake_case__ )
else:
raise ValueError(F'Type not supported for expand_dims: {type(snake_case__ )}.' )
def _snake_case ( snake_case__ : Optional[int] ):
if is_numpy_array(snake_case__ ):
return np.size(snake_case__ )
elif is_torch_tensor(snake_case__ ):
return array.numel()
elif is_tf_tensor(snake_case__ ):
import tensorflow as tf
return tf.size(snake_case__ )
elif is_jax_tensor(snake_case__ ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(snake_case__ )}.' )
def _snake_case ( snake_case__ : int , snake_case__ : Optional[Any] ):
for key, value in auto_map.items():
if isinstance(snake_case__ , (tuple, list) ):
A = [F'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
A = F'{repo_id}--{value}'
return auto_map
def _snake_case ( snake_case__ : int ):
for base_class in inspect.getmro(snake_case__ ):
A = base_class.__module__
A = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' ) | 22 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = '''WhisperFeatureExtractor'''
_lowerCamelCase: str = '''WhisperTokenizer'''
def __init__( self : Optional[Any] ,A_ : Tuple ,A_ : Any ) -> List[Any]:
super().__init__(A_ ,A_ )
A = self.feature_extractor
A = False
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any]=None ,A_ : Optional[Any]=None ,A_ : List[Any]=True ) -> Optional[int]:
return self.tokenizer.get_decoder_prompt_ids(task=A_ ,language=A_ ,no_timestamps=A_ )
def __call__( self : Any ,*A_ : List[Any] ,**A_ : List[str] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A_ ,**A_ )
A = kwargs.pop('audio' ,A_ )
A = kwargs.pop('sampling_rate' ,A_ )
A = kwargs.pop('text' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A = self.feature_extractor(A_ ,*A_ ,sampling_rate=A_ ,**A_ )
if text is not None:
A = self.tokenizer(A_ ,**A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A = encodings['input_ids']
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : int ,**A_ : str ) -> Optional[int]:
return self.tokenizer.batch_decode(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : str ,**A_ : List[Any] ) -> int:
return self.tokenizer.decode(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : List[str]="np" ) -> List[Any]:
return self.tokenizer.get_prompt_ids(A_ ,return_tensors=A_ ) | 22 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : int=2 ,A_ : int=3 ,A_ : int=4 ,A_ : Optional[int]=2 ,A_ : Dict=7 ,A_ : Any=True ,A_ : Dict=True ,A_ : Tuple=True ,A_ : List[Any]=True ,A_ : Dict=99 ,A_ : List[str]=36 ,A_ : str=2 ,A_ : Union[str, Any]=4 ,A_ : Any=37 ,A_ : int="gelu" ,A_ : Tuple=0.1 ,A_ : Tuple=0.1 ,A_ : Union[str, Any]=512 ,A_ : List[Any]=16 ,A_ : int=2 ,A_ : Union[str, Any]=0.02 ,A_ : Dict=6 ,A_ : str=6 ,A_ : List[str]=3 ,A_ : Optional[Any]=4 ,A_ : Optional[Any]=None ,A_ : Any=1000 ,) -> List[Any]:
A = parent
A = batch_size
A = num_channels
A = image_size
A = patch_size
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = coordinate_size
A = shape_size
A = num_labels
A = num_choices
A = scope
A = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
A = text_seq_length
A = (image_size // patch_size) ** 2 + 1
A = self.text_seq_length + self.image_seq_length
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
A = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
A = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A = bbox[i, j, 3]
A = bbox[i, j, 1]
A = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
A = bbox[i, j, 2]
A = bbox[i, j, 0]
A = tmp_coordinate
A = tf.constant(A_ )
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.text_seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
A = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : str ,A_ : Optional[int] ) -> Dict:
A = TFLayoutLMvaModel(config=A_ )
# text + image
A = model(A_ ,pixel_values=A_ ,training=A_ )
A = model(
A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,training=A_ ,)
A = model(A_ ,bbox=A_ ,pixel_values=A_ ,training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
A = model(A_ ,training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
A = model({'pixel_values': pixel_values} ,training=A_ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : int ,A_ : str ,A_ : List[str] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ) -> Dict:
A = self.num_labels
A = TFLayoutLMvaForSequenceClassification(config=A_ )
A = model(
A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,training=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Any ,A_ : List[Any] ,A_ : List[str] ,A_ : Tuple ) -> Tuple:
A = self.num_labels
A = TFLayoutLMvaForTokenClassification(config=A_ )
A = model(
A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,training=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : int ,A_ : Any ,A_ : int ,A_ : Union[str, Any] ,A_ : Any ) -> str:
A = 2
A = TFLayoutLMvaForQuestionAnswering(config=A_ )
A = model(
A_ ,bbox=A_ ,pixel_values=A_ ,attention_mask=A_ ,token_type_ids=A_ ,start_positions=A_ ,end_positions=A_ ,training=A_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.prepare_config_and_inputs()
((A) , (A) , (A) , (A) , (A) , (A) , (A) , (A)) = config_and_inputs
A = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: Optional[int] = (
{'''document-question-answering''': TFLayoutLMvaForQuestionAnswering, '''feature-extraction''': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowerCamelCase: List[str] = False
_lowerCamelCase: str = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Any ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : str ) -> Dict:
return True
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str ,A_ : List[str] ,A_ : Optional[int]=False ) -> dict:
A = copy.deepcopy(A_ )
if model_class in get_values(A_ ):
A = {
k: tf.tile(tf.expand_dims(A_ ,1 ) ,(1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(A_ ,tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A_ ):
A = tf.ones(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(A_ ):
A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(A_ ):
A = tf.zeros(self.model_tester.batch_size ,dtype=tf.intaa )
elif model_class in get_values(A_ ):
A = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=tf.intaa )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = TFLayoutLMvaModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
if getattr(A_ ,'hf_compute_loss' ,A_ ):
# The number of elements in the loss should be the same as the number of elements in the label
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
A = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() ,reverse=A_ )[0]
]
A = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
A = prepared_for_class.pop('input_ids' )
A = model(A_ ,**A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
A = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
A = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
A = -100
A = tf.convert_to_tensor(A_ )
A = model(A_ ,**A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
A = model(A_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
A = self._prepare_for_class(inputs_dict.copy() ,A_ ,return_labels=A_ )
# Get keys that were added with the _prepare_for_class function
A = prepared_for_class.keys() - inputs_dict.keys()
A = inspect.signature(model.call ).parameters
A = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
A = {0: 'input_ids'}
for label_key in label_keys:
A = signature_names.index(A_ )
A = label_key
A = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
A = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
A = prepared_for_class[value]
A = tuple(A_ )
# Send to model
A = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFLayoutLMvaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=A_ ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=A_ ,return_tensors='tf' ).pixel_values
A = tf.constant([[1, 2]] )
A = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) ,axis=0 )
# forward pass
A = model(input_ids=A_ ,bbox=A_ ,pixel_values=A_ ,training=A_ )
# verify the logits
A = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape ,A_ )
A = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] ,A_ ,atol=1e-4 ) ) | 22 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _snake_case ( ):
A = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
A = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(snake_case__ )
# Let's go
A = parser.parse_args()
if not hasattr(snake_case__ , 'func' ):
parser.print_help()
exit(1 )
# Run
A = args.func(snake_case__ )
service.run()
if __name__ == "__main__":
main() | 22 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
A = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=snake_case__ )
A = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=snake_case__ )
env_command_parser(subparsers=snake_case__ )
launch_command_parser(subparsers=snake_case__ )
tpu_command_parser(subparsers=snake_case__ )
test_command_parser(subparsers=snake_case__ )
# Let's go
A = parser.parse_args()
if not hasattr(snake_case__ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(snake_case__ )
if __name__ == "__main__":
main() | 22 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case ( snake_case__ : Optional[int] ):
A = 384
if "tiny" in model_name:
A = [3, 3, 9, 3]
A = [96, 192, 384, 768]
if "small" in model_name:
A = [3, 3, 27, 3]
A = [96, 192, 384, 768]
if "base" in model_name:
A = [3, 3, 27, 3]
A = [128, 256, 512, 1024]
A = 512
if "large" in model_name:
A = [3, 3, 27, 3]
A = [192, 384, 768, 1536]
A = 768
if "xlarge" in model_name:
A = [3, 3, 27, 3]
A = [256, 512, 1024, 2048]
A = 1024
# set label information
A = 150
A = 'huggingface/label-files'
A = 'ade20k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = {v: k for k, v in idalabel.items()}
A = ConvNextConfig(
depths=snake_case__ , hidden_sizes=snake_case__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
A = UperNetConfig(
backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , )
return config
def _snake_case ( snake_case__ : Any ):
A = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any ):
A = dct.pop(snake_case__ )
A = val
def _snake_case ( snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple ):
A = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
A = model_name_to_url[model_name]
A = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )['state_dict']
A = get_upernet_config(snake_case__ )
A = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A = state_dict.pop(snake_case__ )
if "bn" in key:
A = key.replace('bn' , 'batch_norm' )
A = val
# rename keys
A = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
A = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
A = SegformerImageProcessor()
A = processor(snake_case__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A = model(snake_case__ )
if model_name == "upernet-convnext-tiny":
A = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
A = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
A = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
A = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
A = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 22 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
A = AutoTokenizer.from_pretrained('xlm-roberta-base' )
A = 'The dog is cute and lives in the garden house'
A = jnp.array([tokenizer.encode(A_ )] )
A = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
A = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
A = model(A_ )['last_hidden_state']
self.assertEqual(output.shape ,A_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,A_ ,atol=1e-3 ) ) | 22 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCAmelCase_ ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ) -> Any:
A = params
A = np.array(A__ )
A = np.array([len(A__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : int ,A_ : Dict ) -> Dict:
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ) -> Tuple:
return len(self.lengths )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = self.params.max_model_input_size
A = self.lengths > max_len
logger.info(F'Splitting {sum(A__ )} too long sequences.' )
def divide_chunks(A_ : List[str] ,A_ : List[str] ):
return [l[i : i + n] for i in range(0 ,len(A__ ) ,A__ )]
A = []
A = []
if self.params.mlm:
A , A = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
A , A = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
A = np.insert(A__ ,0 ,A__ )
if sub_s[-1] != sep_id:
A = np.insert(A__ ,len(A__ ) ,A__ )
assert len(A__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A__ )
new_tok_ids.extend(A__ )
new_lengths.extend([len(A__ ) for l in sub_seqs] )
A = np.array(A__ )
A = np.array(A__ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = len(self )
A = self.lengths > 11
A = self.token_ids[indices]
A = self.lengths[indices]
A = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
A = self.params.special_tok_ids['unk_token']
A = len(self )
A = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A = (unk_occs / self.lengths) < 0.5
A = self.token_ids[indices]
A = self.lengths[indices]
A = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str] ) -> Any:
A = [t[0] for t in batch]
A = [t[1] for t in batch]
assert len(A__ ) == len(A__ )
# Max for paddings
A = max(A__ )
# Pad token ids
if self.params.mlm:
A = self.params.special_tok_ids['pad_token']
else:
A = self.params.special_tok_ids['unk_token']
A = [list(t.astype(A__ ) ) + [pad_idx] * (max_seq_len_ - len(A__ )) for t in token_ids]
assert len(tk_ ) == len(A__ )
assert all(len(A__ ) == max_seq_len_ for t in tk_ )
A = torch.tensor(tk_ ) # (bs, max_seq_len_)
A = torch.tensor(A__ ) # (bs)
return tk_t, lg_t | 700 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.