code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'rwkv'
lowerCAmelCase_ = {'max_position_embeddings': 'context_length'}
def __init__( self : Optional[Any],__A : List[Any]=5_0_2_7_7,__A : Any=1_0_2_4,__A : Optional[int]=4_0_9_6,__A : int=3_2,__A : Optional[int]=None,__A : Optional[int]=None,__A : Tuple=1e-5,__A : Union[str, Any]=0,__A : Tuple=0,__A : Union[str, Any]=6,__A : int=False,__A : str=True,**__A : str,):
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[Any] = context_length
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
_lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
_lowerCamelCase : int = layer_norm_epsilon
_lowerCamelCase : int = rescale_every
_lowerCamelCase : str = use_cache
_lowerCamelCase : Dict = bos_token_id
_lowerCamelCase : int = eos_token_id
super().__init__(
tie_word_embeddings=__A,bos_token_id=__A,eos_token_id=__A,**__A ) | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : Optional[Any] = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = GPTaTokenizer
def __init__( self : Optional[int],__A : str=None,__A : Tuple=None,__A : Union[str, Any]=None,__A : str="<|endoftext|>",__A : List[Any]="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Dict=False,**__A : Union[str, Any],):
super().__init__(
__A,__A,tokenizer_file=__A,unk_token=__A,bos_token=__A,eos_token=__A,add_prefix_space=__A,**__A,)
_lowerCamelCase : Any = kwargs.pop("add_bos_token",__A )
_lowerCamelCase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : int = add_prefix_space
_lowerCamelCase : Union[str, Any] = pre_tok_class(**__A )
_lowerCamelCase : Tuple = add_prefix_space
def lowerCamelCase_ ( self : Tuple,*__A : Tuple,**__A : List[Any] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : List[Any],*__A : Optional[int],**__A : Optional[Any] ):
_lowerCamelCase : Optional[Any] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
_lowerCamelCase : int = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[Any],__A : "Conversation" ):
_lowerCamelCase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A,add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ :
def __init__( self : str,__A : Tuple,__A : int=2,__A : List[str]=3,__A : Dict=4,__A : str=2,__A : Tuple=7,__A : List[str]=True,__A : int=True,__A : List[Any]=True,__A : Tuple=True,__A : Optional[Any]=9_9,__A : Optional[int]=3_6,__A : str=3,__A : Tuple=4,__A : Tuple=3_7,__A : Any="gelu",__A : Any=0.1,__A : Any=0.1,__A : Optional[int]=5_1_2,__A : Optional[int]=1_6,__A : int=2,__A : str=0.02,__A : Optional[int]=6,__A : Dict=6,__A : Optional[Any]=3,__A : Any=4,__A : int=None,__A : List[Any]=1_0_0_0,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : str = image_size
_lowerCamelCase : Dict = patch_size
_lowerCamelCase : Any = text_seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Any = use_input_mask
_lowerCamelCase : Dict = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Optional[Any] = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = coordinate_size
_lowerCamelCase : int = shape_size
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : Any = num_choices
_lowerCamelCase : List[Any] = scope
_lowerCamelCase : List[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCamelCase : Dict = text_seq_length
_lowerCamelCase : int = (image_size // patch_size) ** 2 + 1
_lowerCamelCase : Optional[Any] = self.text_seq_length + self.image_seq_length
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4],self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase : Optional[Any] = bbox[i, j, 3]
_lowerCamelCase : Tuple = bbox[i, j, 1]
_lowerCamelCase : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase : Union[str, Any] = bbox[i, j, 2]
_lowerCamelCase : Optional[Any] = bbox[i, j, 0]
_lowerCamelCase : Optional[int] = t
_lowerCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : str = ids_tensor([self.batch_size, self.text_seq_length],self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : Dict = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.text_seq_length],self.num_labels )
_lowerCamelCase : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,coordinate_size=self.coordinate_size,shape_size=self.shape_size,input_size=self.image_size,patch_size=self.patch_size,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase_ ( self : Dict,__A : int,__A : str,__A : str,__A : List[Any],__A : List[Any],__A : Any,__A : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Union[str, Any] = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
_lowerCamelCase : Tuple = model(__A,pixel_values=__A )
_lowerCamelCase : int = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A )
_lowerCamelCase : Optional[Any] = model(__A,bbox=__A,pixel_values=__A,token_type_ids=__A )
_lowerCamelCase : Any = model(__A,bbox=__A,pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCamelCase : str = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCamelCase : Tuple = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple,__A : Optional[int],__A : Any,__A : Optional[Any],__A : Dict,__A : List[Any],__A : str,__A : List[str],__A : Union[str, Any] ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Union[str, Any] = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[Any] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int,__A : int,__A : Optional[Any],__A : Optional[Any],__A : Optional[int],__A : Dict,__A : str,__A : Optional[int],__A : Optional[Any] ):
_lowerCamelCase : List[str] = self.num_labels
_lowerCamelCase : str = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase_ ( self : str,__A : int,__A : Tuple,__A : Tuple,__A : Dict,__A : Optional[Any],__A : Optional[int],__A : Union[str, Any],__A : List[Any] ):
_lowerCamelCase : Optional[Any] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(
__A,bbox=__A,pixel_values=__A,attention_mask=__A,token_type_ids=__A,start_positions=__A,end_positions=__A,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : Tuple,__A : List[str],__A : Tuple,__A : Optional[int],__A : Optional[Any],__A : Tuple ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = LayoutLMvaModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : str,__A : Dict=False ):
_lowerCamelCase : List[Any] = copy.deepcopy(__A )
if model_class in get_values(__A ):
_lowerCamelCase : Dict = {
k: v.unsqueeze(1 ).expand(-1,self.model_tester.num_choices,-1 ).contiguous()
if isinstance(__A,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
_lowerCamelCase : Optional[Any] = torch.ones(self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in get_values(__A ):
_lowerCamelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
_lowerCamelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in [
*get_values(__A ),
]:
_lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
elif model_class in [
*get_values(__A ),
]:
_lowerCamelCase : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length),dtype=torch.long,device=__A,)
return inputs_dict
def lowerCamelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
_lowerCamelCase : Union[str, Any] = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=__A,return_tensors="pt" ).pixel_values.to(__A )
_lowerCamelCase : str = torch.tensor([[1, 2]] )
_lowerCamelCase : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCamelCase : Optional[int] = model(
input_ids=input_ids.to(__A ),bbox=bbox.to(__A ),pixel_values=pixel_values.to(__A ),)
# verify the logits
_lowerCamelCase : List[str] = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape,__A )
_lowerCamelCase : int = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3],__A,atol=1e-4 ) ) | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase_ : Dict = sys.version_info >= (3, 10)
def A_ ( _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Tuple=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'titi'
lowerCAmelCase_ = 'toto'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'titi'
lowerCAmelCase_ = 'toto'
lowerCAmelCase_ = 42
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 'toto'
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[Any] = BasicEnum(self.foo )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 'toto'
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Tuple = MixedTypeEnum(self.foo )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default=A , metadata={'help': 'help message'} )
lowerCAmelCase_ = None
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[] )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[1, 2, 3] )
lowerCAmelCase_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
lowerCAmelCase_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field()
lowerCAmelCase_ = field()
lowerCAmelCase_ = field()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Union[str, Any] = BasicEnum(self.required_enum )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = field()
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default='toto' , metadata={'help': 'help message'} )
lowerCAmelCase_ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = None
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = field(default=A , metadata={'help': 'help message'} )
lowerCAmelCase_ = None
lowerCAmelCase_ = list_field(default=[] )
lowerCAmelCase_ = list_field(default=[] )
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : int,__A : argparse.ArgumentParser,__A : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ),len(b._actions ) )
for x, y in zip(a._actions,b._actions ):
_lowerCamelCase : List[str] = {k: v for k, v in vars(__A ).items() if k != "container"}
_lowerCamelCase : List[Any] = {k: v for k, v in vars(__A ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices",__A ) and yy.get("choices",__A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__A ),yy["type"](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,required=__A )
expected.add_argument("--bar",type=__A,required=__A )
expected.add_argument("--baz",type=__A,required=__A )
expected.add_argument("--flag",type=__A,default=__A,const=__A,nargs="?" )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Any = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
(_lowerCamelCase ) : Tuple = parser.parse_args_into_dataclasses(__A,look_for_args_file=__A )
self.assertFalse(example.flag )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo",default=4_2,type=__A )
expected.add_argument("--baz",default="toto",type=__A,help="help message" )
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,default=__A,const=__A,nargs="?" )
expected.add_argument("--baz",type=__A,default=__A,const=__A,nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz",action="store_false",default=__A,dest="baz" )
expected.add_argument("--opt",type=__A,default=__A )
_lowerCamelCase : Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowerCamelCase : str = HfArgumentParser(__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Dict = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Optional[Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : Optional[int] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
_lowerCamelCase : List[str] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__A,Namespace(foo=__A,baz=__A,opt=__A ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = HfArgumentParser(__A )
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo",default="toto",choices=["titi", "toto", 4_2],type=make_choice_type_function(["titi", "toto", 4_2] ),)
self.argparsersEqual(__A,__A )
_lowerCamelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo,"toto" )
_lowerCamelCase : str = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.toto )
_lowerCamelCase : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo,"titi" )
_lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.titi )
_lowerCamelCase : Optional[int] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo,4_2 )
_lowerCamelCase : Any = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo,MixedTypeEnum.fourtytwo )
def lowerCamelCase_ ( self : Union[str, Any] ):
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 'toto'
_lowerCamelCase : int = HfArgumentParser(__A )
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument(
"--foo",default="toto",choices=("titi", "toto", 4_2),type=make_choice_type_function(["titi", "toto", 4_2] ),)
self.argparsersEqual(__A,__A )
_lowerCamelCase : Any = parser.parse_args([] )
self.assertEqual(args.foo,"toto" )
_lowerCamelCase : Optional[Any] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo,"titi" )
_lowerCamelCase : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo,4_2 )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument("--foo_int",nargs="+",default=[],type=__A )
expected.add_argument("--bar_int",nargs="+",default=[1, 2, 3],type=__A )
expected.add_argument("--foo_str",nargs="+",default=["Hallo", "Bonjour", "Hello"],type=__A )
expected.add_argument("--foo_float",nargs="+",default=[0.1, 0.2, 0.3],type=__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
__A,Namespace(foo_int=[],bar_int=[1, 2, 3],foo_str=["Hallo", "Bonjour", "Hello"],foo_float=[0.1, 0.2, 0.3] ),)
_lowerCamelCase : Any = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__A,Namespace(foo_int=[1],bar_int=[2, 3],foo_str=["a", "b", "c"],foo_float=[0.1, 0.7] ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo",default=__A,type=__A )
expected.add_argument("--bar",default=__A,type=__A,help="help message" )
expected.add_argument("--baz",default=__A,type=__A )
expected.add_argument("--ces",nargs="+",default=[],type=__A )
expected.add_argument("--des",nargs="+",default=[],type=__A )
_lowerCamelCase : Optional[int] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowerCamelCase : Any = HfArgumentParser(__A )
self.argparsersEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = parser.parse_args([] )
self.assertEqual(__A,Namespace(foo=__A,bar=__A,baz=__A,ces=[],des=[] ) )
_lowerCamelCase : Any = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__A,Namespace(foo=1_2,bar=3.14,baz="42",ces=["a", "b", "c"],des=[1, 2, 3] ) )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[Any] = HfArgumentParser(__A )
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument("--required_list",nargs="+",type=__A,required=__A )
expected.add_argument("--required_str",type=__A,required=__A )
expected.add_argument(
"--required_enum",type=make_choice_type_function(["titi", "toto"] ),choices=["titi", "toto"],required=__A,)
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : int = argparse.ArgumentParser()
expected.add_argument("--foo",type=__A,required=__A )
expected.add_argument(
"--required_enum",type=make_choice_type_function(["titi", "toto"] ),choices=["titi", "toto"],required=__A,)
expected.add_argument("--opt",type=__A,default=__A )
expected.add_argument("--baz",default="toto",type=__A,help="help message" )
expected.add_argument("--foo_str",nargs="+",default=["Hallo", "Bonjour", "Hello"],type=__A )
self.argparsersEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = HfArgumentParser(__A )
_lowerCamelCase : Optional[Any] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
_lowerCamelCase : Tuple = parser.parse_dict(__A )[0]
_lowerCamelCase : str = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = HfArgumentParser(__A )
_lowerCamelCase : str = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 4_2,
}
self.assertRaises(__A,parser.parse_dict,__A,allow_extra_keys=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = HfArgumentParser(__A )
_lowerCamelCase : List[str] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict = os.path.join(__A,"temp_json" )
os.mkdir(__A )
with open(temp_local_path + ".json","w+" ) as f:
json.dump(__A,__A )
_lowerCamelCase : Tuple = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
_lowerCamelCase : List[Any] = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Union[str, Any] = HfArgumentParser(__A )
_lowerCamelCase : List[str] = {
"foo": 1_2,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Tuple = os.path.join(__A,"temp_yaml" )
os.mkdir(__A )
with open(temp_local_path + ".yaml","w+" ) as f:
yaml.dump(__A,__A )
_lowerCamelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
_lowerCamelCase : Optional[int] = BasicExample(**__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = HfArgumentParser(__A )
self.assertIsNotNone(__A ) | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
from math import factorial
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : int,__A : List[Any] ):
_lowerCamelCase : Dict = real
if isinstance(__A,__A ):
_lowerCamelCase : str = [1] * rank
else:
_lowerCamelCase : Union[str, Any] = rank
def __repr__( self : Union[str, Any] ):
return (
f'{self.real}+'
f'{"+".join(str(__A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real,__A )
def __add__( self : int,__A : str ):
if not isinstance(__A,__A ):
return Dual(self.real + other,self.duals )
_lowerCamelCase : int = self.duals.copy()
_lowerCamelCase : Tuple = other.duals.copy()
if len(__A ) > len(__A ):
o_dual.extend([1] * (len(__A ) - len(__A )) )
elif len(__A ) < len(__A ):
s_dual.extend([1] * (len(__A ) - len(__A )) )
_lowerCamelCase : Tuple = []
for i in range(len(__A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real,__A )
lowerCAmelCase_ = __add__
def __sub__( self : Dict,__A : int ):
return self + other * -1
def __mul__( self : int,__A : Optional[int] ):
if not isinstance(__A,__A ):
_lowerCamelCase : str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other,__A )
_lowerCamelCase : Any = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real,__A )
lowerCAmelCase_ = __mul__
def __truediv__( self : List[str],__A : List[Any] ):
if not isinstance(__A,__A ):
_lowerCamelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other,__A )
raise ValueError
def __floordiv__( self : List[str],__A : Any ):
if not isinstance(__A,__A ):
_lowerCamelCase : Dict = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other,__A )
raise ValueError
def __pow__( self : Optional[Any],__A : Optional[Any] ):
if n < 0 or isinstance(__A,__A ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_lowerCamelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Any ):
"""simple docstring"""
if not callable(_lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_lowerCamelCase : Optional[Any] = Dual(_lowerCAmelCase , 1 )
_lowerCamelCase : List[str] = func(_lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase_ : Any = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : str = 'RegNetConfig'
# Base docstring
UpperCAmelCase_ : Tuple = 'facebook/regnet-y-040'
UpperCAmelCase_ : Optional[int] = [1, 1088, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Dict = 'facebook/regnet-y-040'
UpperCAmelCase_ : Tuple = 'tabby, tabby cat'
UpperCAmelCase_ : Optional[Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any,__A : int,__A : int = 3,__A : int = 1,__A : int = 1,__A : Optional[str] = "relu",**__A : str,):
super().__init__(**__A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCamelCase : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCamelCase : str = tf.keras.layers.ConvaD(
filters=__A,kernel_size=__A,strides=__A,padding="VALID",groups=__A,use_bias=__A,name="convolution",)
_lowerCamelCase : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5,momentum=0.9,name="normalization" )
_lowerCamelCase : Tuple = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase_ ( self : Any,__A : Optional[int] ):
_lowerCamelCase : int = self.convolution(self.padding(__A ) )
_lowerCamelCase : List[str] = self.normalization(__A )
_lowerCamelCase : Union[str, Any] = self.activation(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Tuple,__A : RegNetConfig,**__A : List[Any] ):
super().__init__(**__A )
_lowerCamelCase : Tuple = config.num_channels
_lowerCamelCase : int = TFRegNetConvLayer(
out_channels=config.embedding_size,kernel_size=3,stride=2,activation=config.hidden_act,name="embedder",)
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : List[str] = shape_list(__A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCamelCase : str = tf.transpose(__A,perm=(0, 2, 3, 1) )
_lowerCamelCase : Optional[int] = self.embedder(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any],__A : int,__A : int = 2,**__A : List[str] ):
super().__init__(**__A )
_lowerCamelCase : List[str] = tf.keras.layers.ConvaD(
filters=__A,kernel_size=1,strides=__A,use_bias=__A,name="convolution" )
_lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1e-5,momentum=0.9,name="normalization" )
def lowerCamelCase_ ( self : str,__A : tf.Tensor,__A : bool = False ):
return self.normalization(self.convolution(__A ),training=__A )
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any],__A : int,__A : int,**__A : Optional[Any] ):
super().__init__(**__A )
_lowerCamelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__A,name="pooler" )
_lowerCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=__A,kernel_size=1,activation="relu",name="attention.0" ),
tf.keras.layers.ConvaD(filters=__A,kernel_size=1,activation="sigmoid",name="attention.2" ),
]
def lowerCamelCase_ ( self : Optional[int],__A : int ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_lowerCamelCase : Union[str, Any] = self.pooler(__A )
for layer_module in self.attention:
_lowerCamelCase : List[Any] = layer_module(__A )
_lowerCamelCase : List[str] = hidden_state * pooled
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any,__A : RegNetConfig,__A : int,__A : int,__A : int = 1,**__A : str ):
super().__init__(**__A )
_lowerCamelCase : Dict = in_channels != out_channels or stride != 1
_lowerCamelCase : Dict = max(1,out_channels // config.groups_width )
_lowerCamelCase : Any = (
TFRegNetShortCut(__A,stride=__A,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear",name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(__A,kernel_size=1,activation=config.hidden_act,name="layer.0" ),
TFRegNetConvLayer(
__A,stride=__A,groups=__A,activation=config.hidden_act,name="layer.1" ),
TFRegNetConvLayer(__A,kernel_size=1,activation=__A,name="layer.2" ),
]
_lowerCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self : Tuple,__A : Optional[int] ):
_lowerCamelCase : List[str] = hidden_state
for layer_module in self.layers:
_lowerCamelCase : int = layer_module(__A )
_lowerCamelCase : int = self.shortcut(__A )
hidden_state += residual
_lowerCamelCase : int = self.activation(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any],__A : RegNetConfig,__A : int,__A : int,__A : int = 1,**__A : List[str] ):
super().__init__(**__A )
_lowerCamelCase : str = in_channels != out_channels or stride != 1
_lowerCamelCase : str = max(1,out_channels // config.groups_width )
_lowerCamelCase : Optional[Any] = (
TFRegNetShortCut(__A,stride=__A,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear",name="shortcut" )
)
_lowerCamelCase : Dict = [
TFRegNetConvLayer(__A,kernel_size=1,activation=config.hidden_act,name="layer.0" ),
TFRegNetConvLayer(
__A,stride=__A,groups=__A,activation=config.hidden_act,name="layer.1" ),
TFRegNetSELayer(__A,reduced_channels=int(round(in_channels / 4 ) ),name="layer.2" ),
TFRegNetConvLayer(__A,kernel_size=1,activation=__A,name="layer.3" ),
]
_lowerCamelCase : Any = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self : str,__A : str ):
_lowerCamelCase : Any = hidden_state
for layer_module in self.layers:
_lowerCamelCase : Optional[int] = layer_module(__A )
_lowerCamelCase : Optional[Any] = self.shortcut(__A )
hidden_state += residual
_lowerCamelCase : Optional[int] = self.activation(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str,__A : RegNetConfig,__A : int,__A : int,__A : int = 2,__A : int = 2,**__A : Dict ):
super().__init__(**__A )
_lowerCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_lowerCamelCase : int = [
# downsampling is done in the first layer with stride of 2
layer(__A,__A,__A,stride=__A,name="layers.0" ),
*[layer(__A,__A,__A,name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase_ ( self : Dict,__A : Dict ):
for layer_module in self.layers:
_lowerCamelCase : str = layer_module(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str,__A : RegNetConfig,**__A : Union[str, Any] ):
super().__init__(**__A )
_lowerCamelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__A,config.embedding_size,config.hidden_sizes[0],stride=2 if config.downsample_in_first_stage else 1,depth=config.depths[0],name="stages.0",) )
_lowerCamelCase : str = zip(config.hidden_sizes,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__A,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__A,__A,__A,depth=__A,name=f'stages.{i+1}' ) )
def lowerCamelCase_ ( self : Optional[int],__A : tf.Tensor,__A : bool = False,__A : bool = True ):
_lowerCamelCase : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCamelCase : str = hidden_states + (hidden_state,)
_lowerCamelCase : List[Any] = stage_module(__A )
if output_hidden_states:
_lowerCamelCase : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__A,hidden_states=__A )
@keras_serializable
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase_ = RegNetConfig
def __init__( self : Dict,__A : List[str],**__A : Union[str, Any] ):
super().__init__(**__A )
_lowerCamelCase : List[Any] = config
_lowerCamelCase : str = TFRegNetEmbeddings(__A,name="embedder" )
_lowerCamelCase : str = TFRegNetEncoder(__A,name="encoder" )
_lowerCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__A,name="pooler" )
@unpack_inputs
def lowerCamelCase_ ( self : Union[str, Any],__A : tf.Tensor,__A : Optional[bool] = None,__A : Optional[bool] = None,__A : bool = False,):
_lowerCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Union[str, Any] = self.embedder(__A,training=__A )
_lowerCamelCase : Any = self.encoder(
__A,output_hidden_states=__A,return_dict=__A,training=__A )
_lowerCamelCase : Union[str, Any] = encoder_outputs[0]
_lowerCamelCase : str = self.pooler(__A )
# Change to NCHW output format have uniformity in the modules
_lowerCamelCase : Optional[Any] = tf.transpose(__A,perm=(0, 3, 1, 2) )
_lowerCamelCase : str = tf.transpose(__A,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCamelCase : Tuple = tuple([tf.transpose(__A,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__A,pooler_output=__A,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = 'regnet'
lowerCAmelCase_ = 'pixel_values'
@property
def lowerCamelCase_ ( self : List[str] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4),dtype=tf.floataa )}
UpperCAmelCase_ : str = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase_ : int = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , A , )
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[Any],__A : RegNetConfig,*__A : int,**__A : Optional[Any] ):
super().__init__(__A,*__A,**__A )
_lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(__A,name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,output_type=__A,config_class=_CONFIG_FOR_DOC,modality="vision",expected_output=_EXPECTED_OUTPUT_SHAPE,)
def lowerCamelCase_ ( self : str,__A : tf.Tensor,__A : Optional[bool] = None,__A : Optional[bool] = None,__A : Optional[int]=False,):
_lowerCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : str = self.regnet(
pixel_values=__A,output_hidden_states=__A,return_dict=__A,training=__A,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state,pooler_output=outputs.pooler_output,hidden_states=outputs.hidden_states,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , A , )
class UpperCAmelCase__ ( A , A ):
def __init__( self : Union[str, Any],__A : RegNetConfig,*__A : Optional[int],**__A : Tuple ):
super().__init__(__A,*__A,**__A )
_lowerCamelCase : List[Any] = config.num_labels
_lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(__A,name="regnet" )
# classification head
_lowerCamelCase : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels,name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,output_type=__A,config_class=_CONFIG_FOR_DOC,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,)
def lowerCamelCase_ ( self : Optional[Any],__A : tf.Tensor = None,__A : tf.Tensor = None,__A : bool = None,__A : bool = None,__A : Union[str, Any]=False,):
_lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Tuple = self.regnet(
__A,output_hidden_states=__A,return_dict=__A,training=__A )
_lowerCamelCase : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
_lowerCamelCase : int = self.classifier[0](__A )
_lowerCamelCase : Optional[int] = self.classifier[1](__A )
_lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=__A,logits=__A )
if not return_dict:
_lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__A,logits=__A,hidden_states=outputs.hidden_states ) | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
UpperCAmelCase_ : Tuple = ['gpt2']
UpperCAmelCase_ : Optional[Any] = 'gpt2'
if is_tf_available():
class UpperCAmelCase__ ( tf.Module ):
def __init__( self : Any,__A : Dict ):
super().__init__()
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoConfig.from_pretrained(__A )
_lowerCamelCase : Optional[Any] = TFGPTaLMHeadModel.from_config(__A )
@tf.function(input_signature=(tf.TensorSpec((None,),tf.string,name="text" ),) )
def lowerCamelCase_ ( self : List[Any],__A : Tuple ):
_lowerCamelCase : int = self.tokenizer(__A )
_lowerCamelCase : str = tokenized["input_ids"].to_tensor()
_lowerCamelCase : Tuple = tf.cast(input_ids_dense > 0,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_lowerCamelCase : List[Any] = self.model(input_ids=__A,attention_mask=__A )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
super().setUp()
_lowerCamelCase : Any = [GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_lowerCamelCase : int = [TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_lowerCamelCase : str = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: ไธ ไบ ไธ ไธไบไธ",
"And some much more rare Chinese: ้ฝ ๅ ้ฝๅ ",
"Je vais aussi รฉcrire en franรงais pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaฤ, ๊ผ",
]
_lowerCamelCase : int = list(zip(self.test_sentences,self.test_sentences[::-1] ) )
def lowerCamelCase_ ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_lowerCamelCase : str = tokenizer([test_inputs],return_tensors="tf" )
_lowerCamelCase : int = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_lowerCamelCase : str = python_outputs[key].numpy()
_lowerCamelCase : List[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__A,tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase_ ( self : int ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Optional[Any] = tf.function(__A )
for test_inputs in self.test_sentences:
_lowerCamelCase : List[Any] = tf.constant(__A )
_lowerCamelCase : str = compiled_tokenizer(__A )
_lowerCamelCase : List[Any] = tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : int = ModelToSave(tokenizer=__A )
_lowerCamelCase : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : Union[str, Any] = model.serving(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCamelCase : Optional[int] = Path(__A ) / "saved.model"
tf.saved_model.save(__A,__A,signatures={"serving_default": model.serving} )
_lowerCamelCase : Tuple = tf.saved_model.load(__A )
_lowerCamelCase : Optional[Any] = loaded_model.signatures["serving_default"](__A )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase_ ( self : Any ):
for tf_tokenizer in self.tf_tokenizers:
_lowerCamelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : str = tf_tokenizer(__A ) # Build model with some sample inputs
_lowerCamelCase : List[Any] = tf_tokenizer.get_config()
_lowerCamelCase : Union[str, Any] = TFGPTaTokenizer.from_config(__A )
_lowerCamelCase : int = model_from_config(__A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase_ ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_lowerCamelCase : Any = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
_lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_lowerCamelCase : Optional[Any] = tf_tokenizer(__A,max_length=__A )
_lowerCamelCase : Optional[Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def A_ ( _lowerCAmelCase : SplitDict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = split_dict._to_yaml_list()
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = SplitDict._from_yaml_list(_lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowerCamelCase : Any = None
# the split name of split_dict takes over the name of the split info object
_lowerCamelCase : Optional[int] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name="my_dataset" )] )
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = 'โ'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
def lowerCamelCase_ ( self : Any ):
super().setup()
_lowerCamelCase : str = nn.Dense(5,dtype=self.dtype )
def __call__( self : Optional[int],*__A : Optional[int],**__A : Union[str, Any] ):
_lowerCamelCase : Optional[int] = super().__call__(*__A,**__A )
_lowerCamelCase : int = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = FlaxBigBirdForNaturalQuestionsModule
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ):
"""simple docstring"""
def cross_entropy(_lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=None ):
_lowerCamelCase : Optional[int] = logits.shape[-1]
_lowerCamelCase : Optional[Any] = (labels[..., None] == jnp.arange(_lowerCAmelCase )[None]).astype("f4" )
_lowerCamelCase : str = jax.nn.log_softmax(_lowerCAmelCase , axis=-1 )
_lowerCamelCase : int = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_lowerCamelCase : List[Any] = reduction(_lowerCAmelCase )
return loss
_lowerCamelCase : int = partial(_lowerCAmelCase , reduction=jnp.mean )
_lowerCamelCase : List[Any] = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[int] = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 'google/bigbird-roberta-base'
lowerCAmelCase_ = 3000
lowerCAmelCase_ = 10500
lowerCAmelCase_ = 128
lowerCAmelCase_ = 3
lowerCAmelCase_ = 1
lowerCAmelCase_ = 5
# tx_args
lowerCAmelCase_ = 3E-5
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 20000
lowerCAmelCase_ = 0.0_0_9_5
lowerCAmelCase_ = 'bigbird-roberta-natural-questions'
lowerCAmelCase_ = 'training-expt'
lowerCAmelCase_ = 'data/nq-training.jsonl'
lowerCAmelCase_ = 'data/nq-validation.jsonl'
def lowerCamelCase_ ( self : Optional[int] ):
os.makedirs(self.base_dir,exist_ok=__A )
_lowerCamelCase : Union[str, Any] = os.path.join(self.base_dir,self.save_dir )
_lowerCamelCase : Any = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 4096 # no dynamic padding on TPUs
def __call__( self : Union[str, Any],__A : List[str] ):
_lowerCamelCase : Dict = self.collate_fn(__A )
_lowerCamelCase : int = jax.tree_util.tree_map(__A,__A )
return batch
def lowerCamelCase_ ( self : List[Any],__A : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.fetch_inputs(features["input_ids"] )
_lowerCamelCase : Dict = {
"input_ids": jnp.array(__A,dtype=jnp.intaa ),
"attention_mask": jnp.array(__A,dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"],dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"],dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"],dtype=jnp.intaa ),
}
return batch
def lowerCamelCase_ ( self : Union[str, Any],__A : list ):
_lowerCamelCase : str = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def lowerCamelCase_ ( self : Tuple,__A : list ):
_lowerCamelCase : str = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=None ):
"""simple docstring"""
if seed is not None:
_lowerCamelCase : Union[str, Any] = dataset.shuffle(seed=_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) // batch_size ):
_lowerCamelCase : Any = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_lowerCAmelCase )
@partial(jax.pmap , axis_name="batch" )
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
def loss_fn(_lowerCAmelCase : str ):
_lowerCamelCase : List[Any] = model_inputs.pop("start_labels" )
_lowerCamelCase : Union[str, Any] = model_inputs.pop("end_labels" )
_lowerCamelCase : Optional[int] = model_inputs.pop("pooled_labels" )
_lowerCamelCase : List[str] = state.apply_fn(**_lowerCAmelCase , params=_lowerCAmelCase , dropout_rng=_lowerCAmelCase , train=_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs
return state.loss_fn(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
_lowerCamelCase : List[Any] = jax.random.split(_lowerCAmelCase )
_lowerCamelCase : Tuple = jax.value_and_grad(_lowerCAmelCase )
_lowerCamelCase : int = grad_fn(state.params )
_lowerCamelCase : Optional[Any] = jax.lax.pmean({"loss": loss} , axis_name="batch" )
_lowerCamelCase : Union[str, Any] = jax.lax.pmean(_lowerCAmelCase , "batch" )
_lowerCamelCase : Union[str, Any] = state.apply_gradients(grads=_lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def A_ ( _lowerCAmelCase : int , **_lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = model_inputs.pop("start_labels" )
_lowerCamelCase : Any = model_inputs.pop("end_labels" )
_lowerCamelCase : Any = model_inputs.pop("pooled_labels" )
_lowerCamelCase : Optional[int] = state.apply_fn(**_lowerCAmelCase , params=state.params , train=_lowerCAmelCase )
_lowerCamelCase : Dict = outputs
_lowerCamelCase : List[str] = state.loss_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class UpperCAmelCase__ ( train_state.TrainState ):
lowerCAmelCase_ = struct.field(pytree_node=A )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : int,__A : List[str],__A : Tuple=None ):
_lowerCamelCase : Optional[Any] = model.params
_lowerCamelCase : List[Any] = TrainState.create(
apply_fn=model.__call__,params=__A,tx=__A,loss_fn=__A,)
if ckpt_dir is not None:
_lowerCamelCase : List[Any] = restore_checkpoint(__A,__A )
_lowerCamelCase : List[str] = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
_lowerCamelCase : Any = build_tx(**__A )
_lowerCamelCase : int = train_state.TrainState(
step=__A,apply_fn=model.__call__,params=__A,tx=__A,opt_state=__A,)
_lowerCamelCase : int = args
_lowerCamelCase : str = data_collator
_lowerCamelCase : Tuple = lr
_lowerCamelCase : Tuple = params
_lowerCamelCase : Tuple = jax_utils.replicate(__A )
return state
def lowerCamelCase_ ( self : List[Any],__A : int,__A : Dict,__A : Optional[Any] ):
_lowerCamelCase : str = self.args
_lowerCamelCase : Union[str, Any] = len(__A ) // args.batch_size
_lowerCamelCase : Dict = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[int] = jax.random.split(__A,jax.device_count() )
for epoch in range(args.max_epochs ):
_lowerCamelCase : List[str] = jnp.array(0,dtype=jnp.floataa )
_lowerCamelCase : Any = get_batched_dataset(__A,args.batch_size,seed=__A )
_lowerCamelCase : Union[str, Any] = 0
for batch in tqdm(__A,total=__A,desc=f'Running EPOCH-{epoch}' ):
_lowerCamelCase : Optional[Any] = self.data_collator(__A )
_lowerCamelCase : Optional[Any] = self.train_step_fn(__A,__A,**__A )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
_lowerCamelCase : Dict = jax_utils.unreplicate(state.step )
_lowerCamelCase : Optional[Any] = running_loss.item() / i
_lowerCamelCase : Tuple = self.scheduler_fn(state_step - 1 )
_lowerCamelCase : Any = self.evaluate(__A,__A )
_lowerCamelCase : str = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A,commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}',state=__A )
def lowerCamelCase_ ( self : Union[str, Any],__A : int,__A : Tuple ):
_lowerCamelCase : List[Any] = get_batched_dataset(__A,self.args.batch_size )
_lowerCamelCase : str = len(__A ) // self.args.batch_size
_lowerCamelCase : Optional[Any] = jnp.array(0,dtype=jnp.floataa )
_lowerCamelCase : Dict = 0
for batch in tqdm(__A,total=__A,desc="Evaluating ... " ):
_lowerCamelCase : str = self.data_collator(__A )
_lowerCamelCase : Dict = self.val_step_fn(__A,**__A )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : Union[str, Any] ):
_lowerCamelCase : Any = jax_utils.unreplicate(__A )
print(f'SAVING CHECKPOINT IN {save_dir}',end=" ... " )
self.model_save_fn(__A,params=state.params )
with open(os.path.join(__A,"opt_state.msgpack" ),"wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args,os.path.join(__A,"args.joblib" ) )
joblib.dump(self.data_collator,os.path.join(__A,"data_collator.joblib" ) )
with open(os.path.join(__A,"training_state.json" ),"w" ) as f:
json.dump({"step": state.step.item()},__A )
print("DONE" )
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=" ... " )
with open(os.path.join(_lowerCAmelCase , "flax_model.msgpack" ) , "rb" ) as f:
_lowerCamelCase : str = from_bytes(state.params , f.read() )
with open(os.path.join(_lowerCAmelCase , "opt_state.msgpack" ) , "rb" ) as f:
_lowerCamelCase : List[Any] = from_bytes(state.opt_state , f.read() )
_lowerCamelCase : Optional[int] = joblib.load(os.path.join(_lowerCAmelCase , "args.joblib" ) )
_lowerCamelCase : List[Any] = joblib.load(os.path.join(_lowerCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(_lowerCAmelCase , "training_state.json" ) , "r" ) as f:
_lowerCamelCase : str = json.load(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = num_train_steps - warmup_steps
_lowerCamelCase : Dict = optax.linear_schedule(init_value=_lowerCAmelCase , end_value=_lowerCAmelCase , transition_steps=_lowerCAmelCase )
_lowerCamelCase : Dict = optax.linear_schedule(init_value=_lowerCAmelCase , end_value=1E-7 , transition_steps=_lowerCAmelCase )
_lowerCamelCase : List[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
def weight_decay_mask(_lowerCAmelCase : List[Any] ):
_lowerCamelCase : Tuple = traverse_util.flatten_dict(_lowerCAmelCase )
_lowerCamelCase : Tuple = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(_lowerCAmelCase )
_lowerCamelCase : int = scheduler_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = optax.adamw(learning_rate=_lowerCAmelCase , weight_decay=_lowerCAmelCase , mask=_lowerCAmelCase )
return tx, lr | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", "โ", "โ", "โฏ", "โ
", "ใ", "โ", " ", "โ", "โ", "๏ฟผ", "ย"}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
import requests
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = {"Content-Type": "application/json"}
_lowerCamelCase : Optional[Any] = requests.post(_lowerCAmelCase , json={"text": message_body} , headers=_lowerCAmelCase )
if response.status_code != 200:
_lowerCamelCase : str = (
"Request to slack returned an error "
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'deta'
lowerCAmelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any],__A : Union[str, Any]=None,__A : int=9_0_0,__A : Optional[int]=2_0_4_8,__A : Any=6,__A : str=2_0_4_8,__A : List[Any]=8,__A : Tuple=6,__A : Optional[Any]=1_0_2_4,__A : Optional[int]=8,__A : Union[str, Any]=0.0,__A : Optional[Any]=True,__A : Any="relu",__A : Dict=2_5_6,__A : int=0.1,__A : Dict=0.0,__A : Optional[int]=0.0,__A : Dict=0.02,__A : List[Any]=1.0,__A : Tuple=True,__A : int=False,__A : Optional[int]="sine",__A : Optional[int]=5,__A : List[Any]=4,__A : int=4,__A : Optional[Any]=True,__A : Optional[Any]=3_0_0,__A : Any=True,__A : Tuple=True,__A : Optional[Any]=1,__A : List[Any]=5,__A : List[str]=2,__A : Tuple=1,__A : int=1,__A : Tuple=5,__A : Optional[int]=2,__A : Optional[Any]=0.1,__A : Dict=0.25,**__A : List[str],):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCamelCase : Dict = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__A,__A ):
_lowerCamelCase : Dict = backbone_config.pop("model_type" )
_lowerCamelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Optional[Any] = config_class.from_dict(__A )
_lowerCamelCase : List[str] = backbone_config
_lowerCamelCase : Optional[int] = num_queries
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Any = d_model
_lowerCamelCase : Any = encoder_ffn_dim
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : Tuple = encoder_attention_heads
_lowerCamelCase : List[Any] = decoder_ffn_dim
_lowerCamelCase : List[Any] = decoder_layers
_lowerCamelCase : List[str] = decoder_attention_heads
_lowerCamelCase : Tuple = dropout
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : Tuple = activation_dropout
_lowerCamelCase : Optional[int] = activation_function
_lowerCamelCase : int = init_std
_lowerCamelCase : Optional[Any] = init_xavier_std
_lowerCamelCase : Tuple = encoder_layerdrop
_lowerCamelCase : Tuple = auxiliary_loss
_lowerCamelCase : Optional[Any] = position_embedding_type
# deformable attributes
_lowerCamelCase : List[str] = num_feature_levels
_lowerCamelCase : Optional[Any] = encoder_n_points
_lowerCamelCase : str = decoder_n_points
_lowerCamelCase : Any = two_stage
_lowerCamelCase : List[Any] = two_stage_num_proposals
_lowerCamelCase : int = with_box_refine
_lowerCamelCase : Dict = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_lowerCamelCase : Optional[Any] = class_cost
_lowerCamelCase : List[Any] = bbox_cost
_lowerCamelCase : List[str] = giou_cost
# Loss coefficients
_lowerCamelCase : str = mask_loss_coefficient
_lowerCamelCase : Union[str, Any] = dice_loss_coefficient
_lowerCamelCase : Optional[int] = bbox_loss_coefficient
_lowerCamelCase : List[Any] = giou_loss_coefficient
_lowerCamelCase : Dict = eos_coefficient
_lowerCamelCase : int = focal_alpha
super().__init__(is_encoder_decoder=__A,**__A )
@property
def lowerCamelCase_ ( self : str ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self : str ):
return self.d_model
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : str = copy.deepcopy(self.__dict__ )
_lowerCamelCase : List[Any] = self.backbone_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=0.9_9_9 , _lowerCAmelCase : Any="cosine" , ) -> Optional[Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase : List[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_lowerCamelCase : Tuple = []
for i in range(_lowerCAmelCase ):
_lowerCamelCase : int = i / num_diffusion_timesteps
_lowerCamelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class UpperCAmelCase__ ( A , A ):
@register_to_config
def __init__( self : Any,__A : int = 1_0_0_0,__A : str = "fixed_small_log",__A : bool = True,__A : Optional[float] = 1.0,__A : str = "epsilon",__A : str = "squaredcos_cap_v2",):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_lowerCamelCase : Union[str, Any] = betas_for_alpha_bar(__A )
_lowerCamelCase : str = 1.0 - self.betas
_lowerCamelCase : Any = torch.cumprod(self.alphas,dim=0 )
_lowerCamelCase : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_lowerCamelCase : Optional[Any] = 1.0
# setable values
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : str = torch.from_numpy(np.arange(0,__A )[::-1].copy() )
_lowerCamelCase : Optional[int] = variance_type
def lowerCamelCase_ ( self : Union[str, Any],__A : torch.FloatTensor,__A : Optional[int] = None ):
return sample
def lowerCamelCase_ ( self : List[Any],__A : int,__A : Union[str, torch.device] = None ):
_lowerCamelCase : Dict = num_inference_steps
_lowerCamelCase : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowerCamelCase : Union[str, Any] = (np.arange(0,__A ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_lowerCamelCase : List[str] = torch.from_numpy(__A ).to(__A )
def lowerCamelCase_ ( self : str,__A : Optional[Any],__A : str=None,__A : str=None,__A : Union[str, Any]=None ):
if prev_timestep is None:
_lowerCamelCase : Optional[int] = t - 1
_lowerCamelCase : str = self.alphas_cumprod[t]
_lowerCamelCase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : int = 1 - alpha_prod_t
_lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Any = self.betas[t]
else:
_lowerCamelCase : Optional[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance ฮฒt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowerCamelCase : str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowerCamelCase : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowerCamelCase : Tuple = torch.log(torch.clamp(__A,min=1e-20 ) )
_lowerCamelCase : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowerCamelCase : Optional[int] = variance.log()
_lowerCamelCase : List[Any] = beta.log()
_lowerCamelCase : Optional[Any] = (predicted_variance + 1) / 2
_lowerCamelCase : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase_ ( self : int,__A : torch.FloatTensor,__A : int,__A : torch.FloatTensor,__A : Optional[int] = None,__A : Optional[Any]=None,__A : bool = True,):
_lowerCamelCase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowerCamelCase : Optional[Any] = torch.split(__A,sample.shape[1],dim=1 )
else:
_lowerCamelCase : int = None
# 1. compute alphas, betas
if prev_timestep is None:
_lowerCamelCase : Optional[Any] = t - 1
_lowerCamelCase : str = self.alphas_cumprod[t]
_lowerCamelCase : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowerCamelCase : Any = 1 - alpha_prod_t
_lowerCamelCase : Optional[int] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowerCamelCase : Optional[Any] = self.betas[t]
_lowerCamelCase : str = self.alphas[t]
else:
_lowerCamelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
_lowerCamelCase : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowerCamelCase : str = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : int = torch.clamp(
__A,-self.config.clip_sample_range,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : Tuple = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowerCamelCase : Optional[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample ยต_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowerCamelCase : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowerCamelCase : List[Any] = 0
if t > 0:
_lowerCamelCase : Tuple = randn_tensor(
model_output.shape,dtype=model_output.dtype,generator=__A,device=model_output.device )
_lowerCamelCase : Optional[Any] = self._get_variance(
__A,predicted_variance=__A,prev_timestep=__A,)
if self.variance_type == "fixed_small_log":
_lowerCamelCase : Union[str, Any] = variance
elif self.variance_type == "learned_range":
_lowerCamelCase : Any = (0.5 * variance).exp()
else:
raise ValueError(
f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
" for the UnCLIPScheduler." )
_lowerCamelCase : Tuple = variance * variance_noise
_lowerCamelCase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__A,pred_original_sample=__A )
def lowerCamelCase_ ( self : Optional[Any],__A : torch.FloatTensor,__A : torch.FloatTensor,__A : torch.IntTensor,):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
_lowerCamelCase : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device,dtype=original_samples.dtype )
_lowerCamelCase : str = timesteps.to(original_samples.device )
_lowerCamelCase : str = alphas_cumprod[timesteps] ** 0.5
_lowerCamelCase : Dict = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : str = sqrt_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCamelCase : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_lowerCamelCase : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_lowerCamelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for i in range(len(_lowerCAmelCase ) - 1 , 0 , -1 ):
_lowerCamelCase : Tuple = False
for j in range(_lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCamelCase : Dict = unsorted[j - 1], unsorted[j]
_lowerCamelCase : str = True
for j in range(_lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
_lowerCamelCase : Dict = unsorted[j + 1], unsorted[j]
_lowerCamelCase : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : List[Any] = [int(item) for item in user_input.split(',')]
print(f'''{cocktail_shaker_sort(unsorted) = }''') | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : Tuple,__A : int = 1_6,__A : int = 8_8,__A : Optional[int] = None,__A : int = 1,__A : float = 0.0,__A : int = 3_2,__A : Optional[int] = None,__A : bool = False,__A : Optional[int] = None,__A : Optional[int] = None,__A : str = "geglu",__A : Optional[int] = None,):
super().__init__()
_lowerCamelCase : List[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__A,attention_head_dim=__A,in_channels=__A,num_layers=__A,dropout=__A,norm_num_groups=__A,cross_attention_dim=__A,attention_bias=__A,sample_size=__A,num_vector_embeds=__A,activation_fn=__A,num_embeds_ada_norm=__A,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowerCamelCase : Dict = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowerCamelCase : Dict = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowerCamelCase : int = [1, 0]
def lowerCamelCase_ ( self : List[Any],__A : Dict,__A : List[str],__A : Dict=None,__A : Dict=None,__A : Tuple=None,__A : bool = True,):
_lowerCamelCase : Optional[Any] = hidden_states
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowerCamelCase : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowerCamelCase : List[str] = self.transformer_index_for_condition[i]
_lowerCamelCase : Optional[int] = self.transformers[transformer_index](
__A,encoder_hidden_states=__A,timestep=__A,cross_attention_kwargs=__A,return_dict=__A,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowerCamelCase : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowerCamelCase : Dict = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__A ) | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : int,__A : Union[str, Any]=3,__A : Optional[Any]=3_2,__A : int=3,__A : Optional[int]=1_0,__A : str=[8, 1_6, 3_2, 6_4],__A : Optional[Any]=[1, 1, 2, 1],__A : Any=True,__A : str=True,__A : Union[str, Any]="relu",__A : List[Any]=3,__A : Union[str, Any]=None,__A : Union[str, Any]=["stage2", "stage3", "stage4"],__A : Tuple=[2, 3, 4],__A : Any=1,):
_lowerCamelCase : Dict = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : List[str] = image_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Optional[Any] = embeddings_size
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : str = depths
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Dict = num_labels
_lowerCamelCase : str = scope
_lowerCamelCase : Tuple = len(__A )
_lowerCamelCase : Optional[int] = out_features
_lowerCamelCase : Any = out_indices
_lowerCamelCase : Union[str, Any] = num_groups
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Tuple = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size],self.num_labels )
_lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
return BitConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,out_features=self.out_features,out_indices=self.out_indices,num_groups=self.num_groups,)
def lowerCamelCase_ ( self : List[Any],__A : str,__A : List[str],__A : Tuple ):
_lowerCamelCase : Dict = BitModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2),)
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any],__A : Dict,__A : Union[str, Any] ):
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : Any = BitForImageClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Tuple,__A : List[str],__A : Union[str, Any],__A : Optional[Any] ):
_lowerCamelCase : str = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ),len(config.out_features ) )
self.parent.assertListEqual(model.channels,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Dict = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Tuple = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ),1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ),1 )
self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : int = config_and_inputs
_lowerCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase_ = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = BitModelTester(self )
_lowerCamelCase : int = ConfigTester(self,config_class=__A,has_text_modality=__A )
def lowerCamelCase_ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : int ):
return
@unittest.skip(reason="Bit does not output attentions" )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__A )
_lowerCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ),msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
self.assertTrue(
torch.all(module.bias == 0 ),msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
def lowerCamelCase_ ( self : Dict ):
def check_hidden_states_output(__A : Dict,__A : Union[str, Any],__A : Optional[Any] ):
_lowerCamelCase : str = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__A ),expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : str = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCamelCase : Tuple = layer_type
_lowerCamelCase : Tuple = True
check_hidden_states_output(__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Union[str, Any] = True
check_hidden_states_output(__A,__A,__A )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowerCamelCase_ ( self : Any ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : int = image_processor(images=__A,return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**__A )
# verify the logits
_lowerCamelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Optional[int] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__A,atol=1e-4 ) )
@require_torch
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = BitConfig
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Tuple = BitModelTester(self ) | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : Union[str, Any],__A : Optional[Any]=1_3,__A : Optional[Any]=3_2,__A : Union[str, Any]=3,__A : List[Any]=4,__A : List[str]=[1_0, 2_0, 3_0, 4_0],__A : Tuple=[2, 2, 3, 2],__A : int=True,__A : List[str]=True,__A : Dict=3_7,__A : Optional[int]="gelu",__A : Dict=1_0,__A : Optional[Any]=0.02,__A : Any=["stage2", "stage3", "stage4"],__A : List[Any]=3,__A : Any=None,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Dict = image_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Tuple = num_stages
_lowerCamelCase : Dict = hidden_sizes
_lowerCamelCase : Dict = depths
_lowerCamelCase : Dict = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : List[Any] = out_features
_lowerCamelCase : int = num_labels
_lowerCamelCase : Union[str, Any] = scope
_lowerCamelCase : str = num_stages
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : List[str] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Dict ):
return ConvNextConfig(
num_channels=self.num_channels,num_stages=self.num_stages,hidden_sizes=self.hidden_sizes,depths=self.depths,is_training=self.is_training,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,out_features=self.out_features,)
def lowerCamelCase_ ( self : List[str] ):
return UperNetConfig(
backbone_config=self.get_backbone_config(),hidden_size=5_1_2,pool_scales=[1, 2, 3, 6],use_auxiliary_head=__A,auxiliary_loss_weight=0.4,auxiliary_in_channels=4_0,auxiliary_channels=2_5_6,auxiliary_num_convs=1,auxiliary_concat_input=__A,loss_ignore_index=2_5_5,num_labels=self.num_labels,)
def lowerCamelCase_ ( self : Optional[int],__A : List[Any],__A : str,__A : List[Any] ):
_lowerCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase_ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = UperNetModelTester(self )
_lowerCamelCase : str = ConfigTester(self,config_class=__A,has_text_modality=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : str ):
return
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__A )
_lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1],__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__A )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
def lowerCamelCase_ ( self : int ):
def check_hidden_states_output(__A : int,__A : List[str],__A : Optional[int] ):
_lowerCamelCase : Any = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase : Any = self.model_tester.num_stages
self.assertEqual(len(__A ),expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[int] = True
check_hidden_states_output(__A,__A,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[str] = True
check_hidden_states_output(__A,__A,__A )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Optional[int] = _config_zero_init(__A )
_lowerCamelCase : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(config=__A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(),[0.0, 1.0],msg=f'Parameter {name} of model {model_class} seems not properly initialized',)
@unittest.skip(reason="UperNet does not have tied weights" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@slow
def lowerCamelCase_ ( self : Any ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : int = UperNetForSemanticSegmentation.from_pretrained(__A )
self.assertIsNotNone(__A )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
_lowerCamelCase : Any = Image.open(_lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
_lowerCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__A )
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : Dict = processor(images=__A,return_tensors="pt" ).to(__A )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__A )
_lowerCamelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Optional[Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3],__A,atol=1e-4 ) )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
_lowerCamelCase : List[Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__A )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Optional[Any] = processor(images=__A,return_tensors="pt" ).to(__A )
with torch.no_grad():
_lowerCamelCase : Any = model(**__A )
_lowerCamelCase : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape,__A )
_lowerCamelCase : Dict = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3],__A,atol=1e-4 ) ) | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase_ : str = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
F' reinstalling {pkg}.' )
if not ops[op](version.parse(_lowerCAmelCase ) , version.parse(_lowerCAmelCase ) ):
raise ImportError(
F'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = F'\n{hint}' if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCAmelCase ):
_lowerCamelCase : List[Any] = requirement, None, None
else:
_lowerCamelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F' got {requirement}' )
_lowerCamelCase : Tuple = match[0]
_lowerCamelCase : Union[str, Any] = want_full.split("," ) # there could be multiple requirements
_lowerCamelCase : List[str] = {}
for w in want_range:
_lowerCamelCase : int = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F' but got {requirement}' )
_lowerCamelCase : Any = match[0]
_lowerCamelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
_lowerCamelCase : Union[str, Any] = ".".join([str(_lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return
# check if any version is installed
try:
_lowerCamelCase : Dict = importlib.metadata.version(_lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Dict = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCAmelCase , _lowerCAmelCase ) | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : int ):
_lowerCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_ ( self : str,__A : list[str] ):
for word in words:
self.insert(__A )
def lowerCamelCase_ ( self : Tuple,__A : str ):
_lowerCamelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
_lowerCamelCase : Tuple = TrieNode()
_lowerCamelCase : Any = curr.nodes[char]
_lowerCamelCase : List[str] = True
def lowerCamelCase_ ( self : Any,__A : str ):
_lowerCamelCase : Dict = self
for char in word:
if char not in curr.nodes:
return False
_lowerCamelCase : Optional[int] = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self : Any,__A : str ):
def _delete(__A : TrieNode,__A : str,__A : int ) -> bool:
if index == len(__A ):
# If word does not exist
if not curr.is_leaf:
return False
_lowerCamelCase : int = False
return len(curr.nodes ) == 0
_lowerCamelCase : Dict = word[index]
_lowerCamelCase : Optional[Any] = curr.nodes.get(__A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_lowerCamelCase : List[Any] = _delete(__A,__A,index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self,__A,0 )
def A_ ( _lowerCAmelCase : TrieNode , _lowerCAmelCase : str ):
"""simple docstring"""
if node.is_leaf:
print(_lowerCAmelCase , end=" " )
for key, value in node.nodes.items():
print_words(_lowerCAmelCase , word + key )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "banana bananas bandana band apple all beast".split()
_lowerCamelCase : str = TrieNode()
root.insert_many(_lowerCAmelCase )
# print_words(root, "")
assert all(root.find(_lowerCAmelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : bool ):
"""simple docstring"""
print(str(_lowerCAmelCase ) , "works!" if passes else "doesn't work :(" )
def A_ ( ):
"""simple docstring"""
assert test_trie()
def A_ ( ):
"""simple docstring"""
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main() | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase_ : Tuple = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class UpperCAmelCase__ :
def __init__( self : List[str],__A : int = 1_4 ):
if group not in primes:
raise ValueError("Unsupported Group" )
_lowerCamelCase : Union[str, Any] = primes[group]["prime"]
_lowerCamelCase : Tuple = primes[group]["generator"]
_lowerCamelCase : Any = int(hexlify(urandom(3_2 ) ),base=1_6 )
def lowerCamelCase_ ( self : str ):
return hex(self.__private_key )[2:]
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = pow(self.generator,self.__private_key,self.prime )
return hex(__A )[2:]
def lowerCamelCase_ ( self : List[str],__A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__A,(self.prime - 1) // 2,self.prime ) == 1
)
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Optional[int] = int(__A,base=1_6 )
if not self.is_valid_public_key(__A ):
raise ValueError("Invalid public key" )
_lowerCamelCase : Tuple = pow(__A,self.__private_key,self.prime )
return shaaaa(str(__A ).encode() ).hexdigest()
@staticmethod
def lowerCamelCase_ ( __A : int,__A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__A,(prime - 1) // 2,__A ) == 1
)
@staticmethod
def lowerCamelCase_ ( __A : str,__A : str,__A : int = 1_4 ):
_lowerCamelCase : Optional[Any] = int(__A,base=1_6 )
_lowerCamelCase : Dict = int(__A,base=1_6 )
_lowerCamelCase : str = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(__A,__A ):
raise ValueError("Invalid public key" )
_lowerCamelCase : Dict = pow(__A,__A,__A )
return shaaaa(str(__A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : Any,*__A : Any,**__A : int ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead.",__A,)
super().__init__(*__A,**__A ) | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[int],__A : Union[str, Any]=0.01,__A : str=1_0_0_0 ):
_lowerCamelCase : Any = p_stop
_lowerCamelCase : Tuple = max_length
def __iter__( self : Tuple ):
_lowerCamelCase : int = 0
_lowerCamelCase : str = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCamelCase : List[Any] = random.random() < self.p_stop
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str,__A : List[str],__A : List[str],__A : Union[str, Any]=False,__A : Dict=True ):
_lowerCamelCase : Tuple = [
BatchSamplerShard(__A,2,__A,split_batches=__A,even_batches=__A )
for i in range(2 )
]
_lowerCamelCase : Dict = [list(__A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__A ) for shard in batch_sampler_shards],[len(__A ) for e in expected] )
self.assertListEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
# Check the shards when the dataset is a round multiple of total batch size.
_lowerCamelCase : int = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : int = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Any = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Any = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Tuple = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__A,__A )
_lowerCamelCase : Tuple = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Dict = [[], []]
self.check_batch_sampler_shards(__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : int = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : Any = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : List[Any] = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Tuple = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : List[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : int = [[], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A )
def lowerCamelCase_ ( self : Dict ):
# Check the shards when the dataset is a round multiple of total batch size.
_lowerCamelCase : List[Any] = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : str = BatchSampler(range(2_4 ),batch_size=3,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCamelCase : List[str] = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Tuple = BatchSampler(range(2_1 ),batch_size=3,drop_last=__A )
_lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : List[str] = BatchSampler(range(2_0 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : Dict = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ),batch_size=3,drop_last=__A )
_lowerCamelCase : List[str] = [[], []]
self.check_batch_sampler_shards(__A,__A,even_batches=__A )
def lowerCamelCase_ ( self : Tuple ):
# Check the shards when the dataset is a round multiple of batch size.
_lowerCamelCase : Any = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Union[str, Any] = BatchSampler(range(2_4 ),batch_size=4,drop_last=__A )
# Expected shouldn't change
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCamelCase : Optional[Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : List[Any] = BatchSampler(range(2_2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCamelCase : str = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Optional[int] = BatchSampler(range(2_1 ),batch_size=4,drop_last=__A )
_lowerCamelCase : str = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
# Check the shards when the dataset is very small.
_lowerCamelCase : int = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
_lowerCamelCase : Optional[Any] = BatchSampler(range(2 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(__A,__A,split_batches=__A,even_batches=__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_lowerCamelCase : List[str] = [BatchSamplerShard(__A,2,__A,even_batches=__A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ),3 )
self.assertEqual(len(batch_sampler_shards[1] ),2 )
self.assertListEqual(list(batch_sampler_shards[0] ),[[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ),[[3, 4], [9, 1_0, 1_1]] )
def lowerCamelCase_ ( self : int,__A : List[str],__A : int,__A : List[str],__A : Optional[int]=False,__A : List[str]=2,__A : Optional[Any]=False ):
random.seed(__A )
_lowerCamelCase : str = list(__A )
_lowerCamelCase : List[str] = [
IterableDatasetShard(
__A,batch_size=__A,drop_last=__A,num_processes=__A,process_index=__A,split_batches=__A,)
for i in range(__A )
]
_lowerCamelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__A )
iterable_dataset_lists.append(list(__A ) )
_lowerCamelCase : Union[str, Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCamelCase : Any = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__A ),len(__A ) )
self.assertTrue(len(__A ) % shard_batch_size == 0 )
_lowerCamelCase : Optional[int] = []
for idx in range(0,len(__A ),__A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__A ) < len(__A ):
reference += reference
self.assertListEqual(__A,reference[: len(__A )] )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = 4_2
_lowerCamelCase : Optional[int] = RandomIterableDataset()
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
# Edge case with a very small dataset
_lowerCamelCase : Tuple = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
self.check_iterable_dataset_shards(__A,__A,batch_size=4,drop_last=__A,split_batches=__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = BatchSampler(range(1_6 ),batch_size=4,drop_last=__A )
_lowerCamelCase : Tuple = SkipBatchSampler(__A,2 )
self.assertListEqual(list(__A ),[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = SkipDataLoader(list(range(1_6 ) ),batch_size=4,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader],[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = DataLoader(list(range(1_6 ) ),batch_size=4 )
_lowerCamelCase : List[str] = skip_first_batches(__A,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader],[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[int] = DataLoaderShard(list(range(1_6 ) ),batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
def lowerCamelCase_ ( self : Dict ):
Accelerator()
_lowerCamelCase : List[Any] = DataLoaderDispatcher(range(1_6 ),batch_size=4 )
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__A ):
self.assertEqual(dataloader.end_of_dataloader,idx == 3 ) | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the ๐ค hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
import os
import string
import sys
UpperCAmelCase_ : List[Any] = 1 << 8
UpperCAmelCase_ : int = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
UpperCAmelCase_ : List[Any] = KEYMAP['up']
UpperCAmelCase_ : int = KEYMAP['left']
if sys.platform == "win32":
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
UpperCAmelCase_ : Dict = ord(str(i))
def A_ ( ):
"""simple docstring"""
if os.name == "nt":
import msvcrt
_lowerCamelCase : List[str] = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowerCAmelCase ) == 0:
# Read the keystroke
_lowerCamelCase : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Optional[int] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(_lowerCAmelCase )
if ord(_lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : str = chr(KEYMAP["esc"] )
except KeyError:
_lowerCamelCase : Any = cha[1]
else:
_lowerCamelCase : Dict = ch.decode(_lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : Optional[Any] = sys.stdin.fileno()
_lowerCamelCase : Dict = termios.tcgetattr(_lowerCAmelCase )
try:
tty.setraw(_lowerCAmelCase )
_lowerCamelCase : List[str] = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowerCAmelCase , termios.TCSADRAIN , _lowerCAmelCase )
return ch
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = get_raw_chars()
if ord(_lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowerCAmelCase ) == KEYMAP["esc"]:
_lowerCamelCase : str = get_raw_chars()
if ord(_lowerCAmelCase ) == KEYMAP["mod_int"]:
_lowerCamelCase : Dict = get_raw_chars()
if ord(_lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : int,__A : list[int] ):
_lowerCamelCase : Any = len(__A )
_lowerCamelCase : List[Any] = [0] * len_array
if len_array > 0:
_lowerCamelCase : Tuple = array[0]
for i in range(1,__A ):
_lowerCamelCase : Any = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self : str,__A : int,__A : int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self : Union[str, Any],__A : int ):
_lowerCamelCase : List[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : List[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'codegen'
lowerCAmelCase_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any,__A : Optional[int]=5_0_4_0_0,__A : Any=2_0_4_8,__A : str=2_0_4_8,__A : int=4_0_9_6,__A : Any=2_8,__A : str=1_6,__A : Optional[Any]=6_4,__A : str=None,__A : Optional[int]="gelu_new",__A : Optional[int]=0.0,__A : List[Any]=0.0,__A : str=0.0,__A : Union[str, Any]=1e-5,__A : Tuple=0.02,__A : str=True,__A : Any=5_0_2_5_6,__A : Optional[Any]=5_0_2_5_6,__A : int=False,**__A : Optional[int],):
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Tuple = n_ctx
_lowerCamelCase : int = n_positions
_lowerCamelCase : int = n_embd
_lowerCamelCase : List[str] = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : List[Any] = n_inner
_lowerCamelCase : str = rotary_dim
_lowerCamelCase : Optional[int] = activation_function
_lowerCamelCase : Any = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : Any = attn_pdrop
_lowerCamelCase : Optional[int] = layer_norm_epsilon
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : str = eos_token_id
super().__init__(
bos_token_id=__A,eos_token_id=__A,tie_word_embeddings=__A,**__A )
class UpperCAmelCase__ ( A ):
def __init__( self : Any,__A : PretrainedConfig,__A : str = "default",__A : List[PatchingSpec] = None,__A : bool = False,):
super().__init__(__A,task=__A,patching_specs=__A,use_past=__A )
if not getattr(self._config,"pad_token_id",__A ):
# TODO: how to do that better?
_lowerCamelCase : Any = 0
@property
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__A,direction="inputs" )
_lowerCamelCase : Union[str, Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase_ ( self : List[str] ):
return self._config.n_layer
@property
def lowerCamelCase_ ( self : List[str] ):
return self._config.n_head
def lowerCamelCase_ ( self : Optional[Any],__A : PreTrainedTokenizer,__A : int = -1,__A : int = -1,__A : bool = False,__A : Optional[TensorType] = None,):
_lowerCamelCase : str = super(__A,self ).generate_dummy_inputs(
__A,batch_size=__A,seq_length=__A,is_pair=__A,framework=__A )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : Optional[int] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCamelCase : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCamelCase : List[Any] = seqlen + 2
_lowerCamelCase : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCamelCase : Dict = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowerCamelCase : Tuple = common_inputs["attention_mask"]
if self.use_past:
_lowerCamelCase : Optional[int] = ordered_inputs["attention_mask"].dtype
_lowerCamelCase : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__A,__A,dtype=__A )],dim=1 )
return ordered_inputs
@property
def lowerCamelCase_ ( self : Optional[int] ):
return 1_3
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase_ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = (DDPMScheduler,)
def lowerCamelCase_ ( self : str,**__A : List[str] ):
_lowerCamelCase : List[str] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__A )
return config
def lowerCamelCase_ ( self : str ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__A )
def lowerCamelCase_ ( self : Tuple ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1],[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__A,beta_end=__A )
def lowerCamelCase_ ( self : Any ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def lowerCamelCase_ ( self : Dict ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__A )
def lowerCamelCase_ ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def lowerCamelCase_ ( self : Tuple ):
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__A,prediction_type=__A,sample_max_value=__A,)
def lowerCamelCase_ ( self : List[str] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def lowerCamelCase_ ( self : int ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Optional[int] = scheduler_class(**__A )
_lowerCamelCase : List[Any] = len(__A )
_lowerCamelCase : Union[str, Any] = self.dummy_model()
_lowerCamelCase : Optional[int] = self.dummy_sample_deter
_lowerCamelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(__A ) ):
# 1. predict noise residual
_lowerCamelCase : int = model(__A,__A )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : int = scheduler.step(__A,__A,__A,generator=__A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : Dict = pred_prev_sample
_lowerCamelCase : Dict = torch.sum(torch.abs(__A ) )
_lowerCamelCase : Tuple = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
_lowerCamelCase : Optional[int] = scheduler_class(**__A )
_lowerCamelCase : List[str] = len(__A )
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : str = self.dummy_sample_deter
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(__A ) ):
# 1. predict noise residual
_lowerCamelCase : str = model(__A,__A )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[str] = scheduler.step(__A,__A,__A,generator=__A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase : str = pred_prev_sample
_lowerCamelCase : Tuple = torch.sum(torch.abs(__A ) )
_lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Any = scheduler_class(**__A )
_lowerCamelCase : Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__A )
_lowerCamelCase : List[str] = scheduler.timesteps
for i, timestep in enumerate(__A ):
if i == len(__A ) - 1:
_lowerCamelCase : Union[str, Any] = -1
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
_lowerCamelCase : Optional[int] = scheduler.previous_timestep(__A )
_lowerCamelCase : int = prev_t.item()
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : List[Any] = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__A )
_lowerCamelCase : Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__A,msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : List[str] = scheduler_class(**__A )
_lowerCamelCase : Tuple = [1_0_0, 8_7, 5_0, 1, 0]
_lowerCamelCase : Dict = len(__A )
with self.assertRaises(__A,msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__A,timesteps=__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Tuple = self.get_scheduler_config()
_lowerCamelCase : Optional[int] = scheduler_class(**__A )
_lowerCamelCase : int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__A,msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}",):
scheduler.set_timesteps(timesteps=__A )
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = input("Enter message: " )
_lowerCamelCase : List[str] = input("Enter key [alphanumeric]: " )
_lowerCamelCase : Optional[Any] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
_lowerCamelCase : Any = "encrypt"
_lowerCamelCase : List[Any] = encrypt_message(_lowerCAmelCase , _lowerCAmelCase )
elif mode.lower().startswith("d" ):
_lowerCamelCase : List[Any] = "decrypt"
_lowerCamelCase : Any = decrypt_message(_lowerCAmelCase , _lowerCAmelCase )
print(F'\n{mode.title()}ed message:' )
print(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
return translate_message(_lowerCAmelCase , _lowerCAmelCase , "encrypt" )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
return translate_message(_lowerCAmelCase , _lowerCAmelCase , "decrypt" )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : str = key.upper()
for symbol in message:
_lowerCamelCase : int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCAmelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCAmelCase ):
_lowerCamelCase : Dict = 0
else:
translated.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
if __name__ == "__main__":
main() | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = 'โ'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", "โ", "โ", "โฏ", "โ
", "ใ", "โ", " ", "โ", "โ", "๏ฟผ", "ย"}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str = " " ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = 0
for index, char in enumerate(_lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_lowerCamelCase : List[Any] = index + 1
elif index + 1 == len(_lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
UpperCAmelCase_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ : Tuple = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(s_dict.keys() )
for key in keys:
_lowerCamelCase : Union[str, Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowerCamelCase : Tuple = new_key.replace(_lowerCAmelCase , _lowerCAmelCase )
print(F'{key} -> {new_key}' )
_lowerCamelCase : str = s_dict.pop(_lowerCAmelCase )
return s_dict
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = emb.weight.shape
_lowerCamelCase : Optional[Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
_lowerCamelCase : List[str] = emb.weight.data
return lin_layer
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
_lowerCamelCase : str = os.path.basename(_lowerCAmelCase )
_lowerCamelCase : int = url.split("/" )[-2]
_lowerCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ) and not os.path.isfile(_lowerCAmelCase ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(_lowerCAmelCase ):
_lowerCamelCase : List[Any] = open(_lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(_lowerCAmelCase ) as source, open(_lowerCAmelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
_lowerCamelCase : List[Any] = source.read(8192 )
if not buffer:
break
output.write(_lowerCAmelCase )
loop.update(len(_lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = open(_lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(_lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
_lowerCamelCase : Optional[int] = _download(_MODELS[checkpoint_path] )
else:
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
_lowerCamelCase : Union[str, Any] = original_checkpoint["dims"]
_lowerCamelCase : Optional[int] = original_checkpoint["model_state_dict"]
_lowerCamelCase : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowerCAmelCase )
rename_keys(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Union[str, Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_lowerCamelCase : List[Any] = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_lowerCAmelCase , decoder_ffn_dim=_lowerCAmelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_lowerCamelCase : List[str] = WhisperForConditionalGeneration(_lowerCAmelCase )
_lowerCamelCase : Any = model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0 and not set(_lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F' but all the following weights are missing {missing}' )
if tie_embeds:
_lowerCamelCase : Dict = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCamelCase : int = proj_out_weights
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Any = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path) | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ) -> Any:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ) -> int:
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = RobertaTokenizer
lowerCAmelCase_ = RobertaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'cls_token': '<s>'}
def lowerCamelCase_ ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCamelCase : List[Any] = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Union[str, Any] = {"unk_token": "<unk>"}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : int = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,**__A : Optional[int] ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Tuple = "lower newer"
_lowerCamelCase : Optional[int] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = self.tokenizer_class(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : int = "lower newer"
_lowerCamelCase : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : Dict = tokenizer.tokenize(__A ) # , add_prefix_space=True)
self.assertListEqual(__A,__A )
_lowerCamelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!",add_special_tokens=__A ),[0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cรฉcรฉ herlolip 418",add_special_tokens=__A ),[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2],)
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = self.tokenizer_class.from_pretrained("roberta-base" )
_lowerCamelCase : List[str] = tokenizer.encode("sequence builders",add_special_tokens=__A )
_lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build",add_special_tokens=__A )
_lowerCamelCase : Optional[int] = tokenizer.encode(
"sequence builders",add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = tokenizer.encode(
"sequence builders","multi-sequence build",add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A )
_lowerCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__A,__A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : int = "Encode this sequence."
_lowerCamelCase : Union[str, Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
_lowerCamelCase : Optional[Any] = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__A,__A )
_lowerCamelCase : List[Any] = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__A,__A )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
_lowerCamelCase : Optional[Any] = tokenizer.encode(__A,add_special_tokens=__A )
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__A,__A )
# Testing spaces after special tokens
_lowerCamelCase : str = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__A,lstrip=__A,rstrip=__A )} ) # mask token has a left space
_lowerCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(__A )
_lowerCamelCase : str = "Encode <mask> sequence"
_lowerCamelCase : Optional[int] = "Encode <mask>sequence"
_lowerCamelCase : int = tokenizer.encode(__A )
_lowerCamelCase : Any = encoded.index(__A )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__A,__A )
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : Dict = encoded.index(__A )
_lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__A,__A )
def lowerCamelCase_ ( self : int ):
pass
def lowerCamelCase_ ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : int = self.tokenizer_class.from_pretrained(__A,**__A )
_lowerCamelCase : int = "A, <mask> AllenNLP sentence."
_lowerCamelCase : Tuple = tokenizer_r.encode_plus(__A,add_special_tokens=__A,return_token_type_ids=__A )
_lowerCamelCase : str = tokenizer_p.encode_plus(__A,add_special_tokens=__A,return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ),sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ),sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ),)
_lowerCamelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"],[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"],[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
__A,["<s>", "A", ",", "<mask>", "ฤ Allen", "N", "LP", "ฤ sentence", ".", "</s>"] )
self.assertSequenceEqual(
__A,["<s>", "A", ",", "<mask>", "ฤ Allen", "N", "LP", "ฤ sentence", ".", "</s>"] )
def lowerCamelCase_ ( self : List[Any] ):
for trim_offsets, add_prefix_space in itertools.product([True, False],repeat=2 ):
_lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"],__A )
self.assertEqual(post_processor_state["add_prefix_space"],__A )
self.assertEqual(post_processor_state["trim_offsets"],__A )
def lowerCamelCase_ ( self : List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : List[Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : List[Any] = f'{text_of_1_token} {text_of_1_token}'
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : Optional[Any] = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(len(__A ) + 1, len(__A ) + 1 + len(__A )),)
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : int = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(len(__A ) + 1, len(__A ) + 1 + len(__A )),)
_lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : str = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(len(__A ), len(__A ) + 1 + len(__A )),)
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : List[str] = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(len(__A ), len(__A ) + 1 + len(__A )),)
_lowerCamelCase : Optional[Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : Optional[Any] = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(1, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )),)
_lowerCamelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : str = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )),)
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
__A,use_fast=__A,add_prefix_space=__A,trim_offsets=__A )
_lowerCamelCase : Optional[Any] = tokenizer_r(__A,return_offsets_mapping=__A,add_special_tokens=__A )
self.assertEqual(encoding.offset_mapping[0],(0, 1 + len(__A )) )
self.assertEqual(
encoding.offset_mapping[1],(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )),) | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_lowerCamelCase : Any = UNetaDModel(
block_out_channels=(3_2, 6_4),layers_per_block=2,sample_size=3_2,in_channels=3,out_channels=3,down_block_types=("DownBlock2D", "AttnDownBlock2D"),up_block_types=("AttnUpBlock2D", "UpBlock2D"),)
return model
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = self.dummy_uncond_unet
_lowerCamelCase : List[str] = KarrasVeScheduler()
_lowerCamelCase : Union[str, Any] = KarrasVePipeline(unet=__A,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = pipe(num_inference_steps=2,generator=__A,output_type="numpy" ).images
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : str = pipe(num_inference_steps=2,generator=__A,output_type="numpy",return_dict=__A )[0]
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowerCamelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : int = "google/ncsnpp-celebahq-256"
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = KarrasVeScheduler()
_lowerCamelCase : Dict = KarrasVePipeline(unet=__A,scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCamelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCamelCase : Any = pipe(num_inference_steps=2_0,generator=__A,output_type="numpy" ).images
_lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_lowerCamelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase_ : str = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
from math import loga
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCAmelCase_ : Union[str, Any] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'maskformer'
lowerCAmelCase_ = {'hidden_size': 'mask_feature_size'}
lowerCAmelCase_ = ['resnet', 'swin']
lowerCAmelCase_ = ['detr']
def __init__( self : str,__A : int = 2_5_6,__A : int = 2_5_6,__A : float = 0.1,__A : bool = False,__A : Optional[Dict] = None,__A : Optional[Dict] = None,__A : float = 0.02,__A : float = 1.0,__A : float = 1.0,__A : float = 1.0,__A : float = 20.0,__A : Optional[bool] = None,**__A : List[str],):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCamelCase : int = SwinConfig(
image_size=3_8_4,in_channels=3,patch_size=4,embed_dim=1_2_8,depths=[2, 2, 1_8, 2],num_heads=[4, 8, 1_6, 3_2],window_size=1_2,drop_path_rate=0.3,out_features=["stage1", "stage2", "stage3", "stage4"],)
if isinstance(__A,__A ):
_lowerCamelCase : Optional[Any] = backbone_config.pop("model_type" )
_lowerCamelCase : str = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Union[str, Any] = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCamelCase : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
_lowerCamelCase : Dict = (
decoder_config.pop("model_type" ) if isinstance(__A,__A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(__A,__A ):
_lowerCamelCase : str = CONFIG_MAPPING[decoder_type]
_lowerCamelCase : List[str] = config_class.from_dict(__A )
_lowerCamelCase : Tuple = backbone_config
_lowerCamelCase : str = decoder_config
# main feature dimension for the model
_lowerCamelCase : Dict = fpn_feature_size
_lowerCamelCase : Any = mask_feature_size
# initializer
_lowerCamelCase : Optional[int] = init_std
_lowerCamelCase : str = init_xavier_std
# Hungarian matcher && loss
_lowerCamelCase : Optional[int] = cross_entropy_weight
_lowerCamelCase : Union[str, Any] = dice_weight
_lowerCamelCase : Optional[Any] = mask_weight
_lowerCamelCase : Any = use_auxiliary_loss
_lowerCamelCase : List[Any] = no_object_weight
_lowerCamelCase : List[Any] = output_auxiliary_logits
_lowerCamelCase : int = self.decoder_config.encoder_attention_heads
_lowerCamelCase : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**__A )
@classmethod
def lowerCamelCase_ ( cls : Any,__A : PretrainedConfig,__A : PretrainedConfig,**__A : Optional[int] ):
return cls(
backbone_config=__A,decoder_config=__A,**__A,)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Dict = self.backbone_config.to_dict()
_lowerCamelCase : Union[str, Any] = self.decoder_config.to_dict()
_lowerCamelCase : Tuple = self.__class__.model_type
return output | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Any = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['DeiTFeatureExtractor']
UpperCAmelCase_ : Any = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if len(_lowerCAmelCase ) == 0:
return []
_lowerCamelCase : Union[str, Any] = min(_lowerCAmelCase ), max(_lowerCAmelCase )
_lowerCamelCase : int = int(max_value - min_value ) + 1
_lowerCamelCase : list[list] = [[] for _ in range(_lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCAmelCase )
return [v for bucket in buckets for v in sorted(_lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not nums:
return 0
_lowerCamelCase : List[Any] = nums[0]
_lowerCamelCase : Tuple = 0
for num in nums[1:]:
_lowerCamelCase : Optional[int] = (
max_excluding + num,
max(_lowerCAmelCase , _lowerCAmelCase ),
)
return max(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
UpperCAmelCase_ : List[Any] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
UpperCAmelCase_ : Union[str, Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
UpperCAmelCase_ : List[Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase_ : Optional[int] = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
UpperCAmelCase_ : Tuple = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for tf_name, hf_name in patterns:
_lowerCamelCase : int = k.replace(_lowerCAmelCase , _lowerCAmelCase )
return k
def A_ ( _lowerCAmelCase : dict , _lowerCAmelCase : dict ):
"""simple docstring"""
_lowerCamelCase : Dict = BigBirdPegasusConfig(**_lowerCAmelCase )
_lowerCamelCase : Tuple = BigBirdPegasusForConditionalGeneration(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = torch_model.state_dict()
_lowerCamelCase : Optional[int] = {}
# separating decoder weights
_lowerCamelCase : Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
_lowerCamelCase : int = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
_lowerCamelCase : Any = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
_lowerCamelCase : Any = DECODER_PATTERNS
_lowerCamelCase : List[str] = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_lowerCamelCase : Union[str, Any] = v.T
_lowerCamelCase : List[Any] = torch.from_numpy(_lowerCAmelCase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
_lowerCamelCase : Optional[int] = [k.endswith(_lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_lowerCAmelCase ):
continue
_lowerCamelCase : str = REMAINING_PATTERNS
_lowerCamelCase : str = rename_state_dict_key(_lowerCAmelCase , _lowerCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
_lowerCamelCase : int = v.T
_lowerCamelCase : Optional[Any] = torch.from_numpy(_lowerCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
_lowerCamelCase : Dict = mapping["model.embed_positions.weight"]
_lowerCamelCase : Union[str, Any] = mapping.pop("model.embed_positions.weight" )
_lowerCamelCase : List[str] = torch_model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = tf.train.list_variables(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[Any] = ["global_step"]
for name, shape in tqdm(_lowerCAmelCase , desc="converting tf checkpoint to dict" ):
_lowerCamelCase : List[Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCamelCase : Any = tf.train.load_variable(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : List[Any] = array
return tf_weights
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : dict ):
"""simple docstring"""
_lowerCamelCase : str = get_tf_weights_as_numpy(_lowerCAmelCase )
_lowerCamelCase : Tuple = convert_bigbird_pegasus(_lowerCAmelCase , _lowerCAmelCase )
torch_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : str = parser.parse_args()
UpperCAmelCase_ : Dict = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the ๐ค hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any],__A : int,__A : int ):
_lowerCamelCase : Dict = jnp.ones((batch_size, length) ) / length
return scores
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = None
_lowerCamelCase : Any = 2_0
_lowerCamelCase : Optional[int] = self._get_uniform_logits(batch_size=2,length=__A )
# tweak scores to not be uniform anymore
_lowerCamelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowerCamelCase : str = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowerCamelCase : Union[str, Any] = jax.nn.softmax(__A,axis=-1 )
_lowerCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowerCamelCase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__A,scores.copy(),cur_len=__A ),axis=-1 )
_lowerCamelCase : int = jax.nn.softmax(temp_dist_warper_smoother(__A,scores.copy(),cur_len=__A ),axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :],warped_prob_sharp[0, :],atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :],warped_prob_smooth[0, :],atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(),warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(),warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(),warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(),warped_prob_smooth[1, :].min() )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = 1_0
_lowerCamelCase : List[Any] = 2
# create ramp distribution
_lowerCamelCase : int = np.broadcast_to(np.arange(__A )[None, :],(batch_size, vocab_size) ).copy()
_lowerCamelCase : str = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowerCamelCase : List[str] = FlaxTopKLogitsWarper(3 )
_lowerCamelCase : Dict = top_k_warp(__A,__A,cur_len=__A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(),7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(),2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowerCamelCase : List[Any] = 5
_lowerCamelCase : List[str] = FlaxTopKLogitsWarper(top_k=1,filter_value=0.0,min_tokens_to_keep=3 )
_lowerCamelCase : str = np.broadcast_to(np.arange(__A )[None, :],(batch_size, length) ).copy()
_lowerCamelCase : str = top_k_warp_safety_check(__A,__A,cur_len=__A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(),[2, 2] )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = 1_0
_lowerCamelCase : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowerCamelCase : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_lowerCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
_lowerCamelCase : List[Any] = np.exp(top_p_warp(__A,__A,cur_len=__A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowerCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# check edge cases with negative and extreme logits
_lowerCamelCase : int = np.broadcast_to(np.arange(__A )[None, :],(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowerCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_lowerCamelCase : Tuple = FlaxTopPLogitsWarper(0.9,min_tokens_to_keep=2,filter_value=0.0 )
_lowerCamelCase : Optional[Any] = top_p_warp(__A,__A,cur_len=__A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(),[3, 2] )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Union[str, Any] = 2_0
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Any = FlaxMinLengthLogitsProcessor(min_length=1_0,eos_token_id=__A )
# check that min length is applied at length 5
_lowerCamelCase : Dict = ids_tensor((batch_size, 2_0),vocab_size=2_0 )
_lowerCamelCase : int = 5
_lowerCamelCase : List[Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Dict = min_dist_processor(__A,__A,cur_len=__A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(),4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
_lowerCamelCase : List[str] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Any = 1_5
_lowerCamelCase : List[str] = min_dist_processor(__A,__A,cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = 2_0
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Any = 0
_lowerCamelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
# check that all scores are -inf except the bos_token_id score
_lowerCamelCase : str = ids_tensor((batch_size, 1),vocab_size=2_0 )
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Union[str, Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Dict = logits_processor(__A,__A,cur_len=__A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(),4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : Optional[Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Dict = logits_processor(__A,__A,cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = 2_0
_lowerCamelCase : List[str] = 4
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = 5
_lowerCamelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__A,eos_token_id=__A )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowerCamelCase : Optional[Any] = ids_tensor((batch_size, 4),vocab_size=2_0 )
_lowerCamelCase : Any = 4
_lowerCamelCase : Optional[Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Tuple = logits_processor(__A,__A,cur_len=__A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(),4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Any = logits_processor(__A,__A,cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[str] = 4
_lowerCamelCase : Any = 1_0
_lowerCamelCase : int = 1_5
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : str = 1_5
# dummy input_ids and scores
_lowerCamelCase : Tuple = ids_tensor((batch_size, sequence_length),__A )
_lowerCamelCase : Tuple = input_ids.copy()
_lowerCamelCase : Dict = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Optional[Any] = scores.copy()
# instantiate all dist processors
_lowerCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
_lowerCamelCase : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0,eos_token_id=__A )
_lowerCamelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
_lowerCamelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=__A,eos_token_id=__A )
_lowerCamelCase : Tuple = 1_0
# no processor list
_lowerCamelCase : str = temp_dist_warp(__A,__A,cur_len=__A )
_lowerCamelCase : List[Any] = top_k_warp(__A,__A,cur_len=__A )
_lowerCamelCase : Any = top_p_warp(__A,__A,cur_len=__A )
_lowerCamelCase : Tuple = min_dist_proc(__A,__A,cur_len=__A )
_lowerCamelCase : Optional[int] = bos_dist_proc(__A,__A,cur_len=__A )
_lowerCamelCase : Union[str, Any] = eos_dist_proc(__A,__A,cur_len=__A )
# with processor list
_lowerCamelCase : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase : List[Any] = processor(__A,__A,cur_len=__A )
# scores should be equal
self.assertTrue(jnp.allclose(__A,__A,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(),input_ids_comp.tolist() )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[str] = 1_0
_lowerCamelCase : Any = 1_5
_lowerCamelCase : Dict = 2
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Any = 1_5
# dummy input_ids and scores
_lowerCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length),__A )
_lowerCamelCase : Tuple = input_ids.copy()
_lowerCamelCase : Optional[Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Optional[Any] = scores.copy()
# instantiate all dist processors
_lowerCamelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
_lowerCamelCase : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0,eos_token_id=__A )
_lowerCamelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
_lowerCamelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__A,eos_token_id=__A )
_lowerCamelCase : List[str] = 1_0
# no processor list
def run_no_processor_list(__A : Tuple,__A : str,__A : List[Any] ):
_lowerCamelCase : str = temp_dist_warp(__A,__A,cur_len=__A )
_lowerCamelCase : List[str] = top_k_warp(__A,__A,cur_len=__A )
_lowerCamelCase : Any = top_p_warp(__A,__A,cur_len=__A )
_lowerCamelCase : Dict = min_dist_proc(__A,__A,cur_len=__A )
_lowerCamelCase : Dict = bos_dist_proc(__A,__A,cur_len=__A )
_lowerCamelCase : int = eos_dist_proc(__A,__A,cur_len=__A )
return scores
# with processor list
def run_processor_list(__A : str,__A : int,__A : int ):
_lowerCamelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase : List[str] = processor(__A,__A,cur_len=__A )
return scores
_lowerCamelCase : str = jax.jit(__A )
_lowerCamelCase : Optional[Any] = jax.jit(__A )
_lowerCamelCase : Optional[Any] = jitted_run_no_processor_list(__A,__A,__A )
_lowerCamelCase : Any = jitted_run_processor_list(__A,__A,__A )
# scores should be equal
self.assertTrue(jnp.allclose(__A,__A,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(),input_ids_comp.tolist() )
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A_ ( _lowerCAmelCase : str = "laptop" ):
"""simple docstring"""
_lowerCamelCase : Dict = F'https://www.amazon.in/laptop/s?k={product}'
_lowerCamelCase : Tuple = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCamelCase : int = BeautifulSoup(requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCamelCase : Dict = item.ha.text
_lowerCamelCase : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCamelCase : Optional[int] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCamelCase : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCamelCase : Optional[Any] = "Not available"
try:
_lowerCamelCase : Dict = (
"โน"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("โน" )[1]
)
except AttributeError:
_lowerCamelCase : Union[str, Any] = ""
try:
_lowerCamelCase : Union[str, Any] = float(
(
(
float(product_mrp.strip("โน" ).replace("," , "" ) )
- float(product_price.strip("โน" ).replace("," , "" ) )
)
/ float(product_mrp.strip("โน" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCamelCase : List[str] = float("nan" )
except AttributeError:
pass
_lowerCamelCase : Dict = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase : List[str] = " "
_lowerCamelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCAmelCase_ : str = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''') | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowerCamelCase_ ( self : str ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ),supervised_keys=__A,)
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Dict ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN,gen_kwargs={"examples": get_test_dummy_examples()} )]
def lowerCamelCase_ ( self : Any,__A : Any,__A : Optional[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
class UpperCAmelCase__ ( datasets.BeamBasedBuilder ):
def lowerCamelCase_ ( self : str ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ),supervised_keys=__A,)
def lowerCamelCase_ ( self : Optional[Any],__A : int,__A : str ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,gen_kwargs={"examples": get_test_nested_examples()} )
]
def lowerCamelCase_ ( self : Tuple,__A : str,__A : Optional[Any] ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
def A_ ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def A_ ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class UpperCAmelCase__ ( A ):
@require_beam
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Optional[Any] = DummyBeamDataset(cache_dir=__A,beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A,builder.name,"default","0.0.0",f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features,datasets.Features({"content": datasets.Value("string" )} ) )
_lowerCamelCase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,__A )
self.assertEqual(dset["train"].info.splits["train"].num_examples,__A )
self.assertDictEqual(dset["train"][0],get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1],get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self : Union[str, Any] ):
import apache_beam as beam
_lowerCamelCase : str = beam.io.parquetio.WriteToParquet
_lowerCamelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Union[str, Any] = DummyBeamDataset(cache_dir=__A,beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_lowerCamelCase : List[str] = partial(__A,num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__A,builder.name,"default","0.0.0",f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__A,builder.name,"default","0.0.0",f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features,datasets.Features({"content": datasets.Value("string" )} ) )
_lowerCamelCase : str = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,__A )
self.assertEqual(dset["train"].info.splits["train"].num_examples,__A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ),sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__A,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : Dict = DummyBeamDataset(cache_dir=__A )
self.assertRaises(datasets.builder.MissingBeamOptions,builder.download_and_prepare )
@require_beam
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[str] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCamelCase : str = NestedBeamDataset(cache_dir=__A,beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A,builder.name,"default","0.0.0",f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features,datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_lowerCamelCase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows,__A )
self.assertEqual(dset["train"].info.splits["train"].num_examples,__A )
self.assertDictEqual(dset["train"][0],get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1],get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A,builder.name,"default","0.0.0","dataset_info.json" ) ) )
del dset | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
while b:
_lowerCamelCase : List[str] = b, a % b
return a
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(_lowerCAmelCase , a % b )
def A_ ( ):
"""simple docstring"""
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase_ : Any = logging.get_logger(__name__)
@add_end_docstrings(
A , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : Dict,__A : GenericTensor ):
if self.framework == "tf":
_lowerCamelCase : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_lowerCamelCase : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id,as_tuple=__A )
else:
raise ValueError("Unsupported framework" )
return masked_index
def lowerCamelCase_ ( self : Dict,__A : GenericTensor ):
_lowerCamelCase : List[Any] = self.get_masked_index(__A )
_lowerCamelCase : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask",self.model.base_model_prefix,f'No mask_token ({self.tokenizer.mask_token}) found on the input',)
def lowerCamelCase_ ( self : Optional[Any],__A : GenericTensor ):
if isinstance(__A,__A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__A )
def lowerCamelCase_ ( self : List[str],__A : Tuple,__A : List[Any]=None,**__A : str ):
if return_tensors is None:
_lowerCamelCase : Optional[int] = self.framework
_lowerCamelCase : List[Any] = self.tokenizer(__A,return_tensors=__A )
self.ensure_exactly_one_mask_token(__A )
return model_inputs
def lowerCamelCase_ ( self : List[Any],__A : Union[str, Any] ):
_lowerCamelCase : Any = self.model(**__A )
_lowerCamelCase : Optional[Any] = model_inputs["input_ids"]
return model_outputs
def lowerCamelCase_ ( self : List[str],__A : int,__A : Optional[Any]=5,__A : int=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
_lowerCamelCase : str = target_ids.shape[0]
_lowerCamelCase : int = model_outputs["input_ids"][0]
_lowerCamelCase : int = model_outputs["logits"]
if self.framework == "tf":
_lowerCamelCase : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_lowerCamelCase : List[Any] = outputs.numpy()
_lowerCamelCase : List[str] = outputs[0, masked_index, :]
_lowerCamelCase : Dict = stable_softmax(__A,axis=-1 )
if target_ids is not None:
_lowerCamelCase : Dict = tf.gather_nd(tf.squeeze(__A,0 ),target_ids.reshape(-1,1 ) )
_lowerCamelCase : int = tf.expand_dims(__A,0 )
_lowerCamelCase : Tuple = tf.math.top_k(__A,k=__A )
_lowerCamelCase : Union[str, Any] = topk.values.numpy(), topk.indices.numpy()
else:
_lowerCamelCase : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id,as_tuple=__A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_lowerCamelCase : List[str] = outputs[0, masked_index, :]
_lowerCamelCase : Optional[Any] = logits.softmax(dim=-1 )
if target_ids is not None:
_lowerCamelCase : List[Any] = probs[..., target_ids]
_lowerCamelCase : Optional[int] = probs.topk(__A )
_lowerCamelCase : int = []
_lowerCamelCase : Tuple = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(),predictions.tolist() ) ):
_lowerCamelCase : List[Any] = []
for v, p in zip(_values,_predictions ):
# Copy is important since we're going to modify this array in place
_lowerCamelCase : Optional[Any] = input_ids.numpy().copy()
if target_ids is not None:
_lowerCamelCase : List[str] = target_ids[p].tolist()
_lowerCamelCase : Optional[int] = p
# Filter padding out:
_lowerCamelCase : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_lowerCamelCase : str = self.tokenizer.decode(__A,skip_special_tokens=__A )
_lowerCamelCase : Any = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__A )
result.append(__A )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self : Optional[Any],__A : Tuple,__A : Optional[Any]=None ):
if isinstance(__A,__A ):
_lowerCamelCase : Tuple = [targets]
try:
_lowerCamelCase : Dict = self.tokenizer.get_vocab()
except Exception:
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : List[str] = []
for target in targets:
_lowerCamelCase : Dict = vocab.get(__A,__A )
if id_ is None:
_lowerCamelCase : int = self.tokenizer(
__A,add_special_tokens=__A,return_attention_mask=__A,return_token_type_ids=__A,max_length=1,truncation=__A,)["input_ids"]
if len(__A ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"We cannot replace it with anything meaningful, ignoring it" )
continue
_lowerCamelCase : List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
_lowerCamelCase : Any = list(set(__A ) )
if len(__A ) == 0:
raise ValueError("At least one target must be provided when passed." )
_lowerCamelCase : List[Any] = np.array(__A )
return target_ids
def lowerCamelCase_ ( self : str,__A : Any=None,__A : int=None ):
_lowerCamelCase : List[str] = {}
if targets is not None:
_lowerCamelCase : Optional[int] = self.get_target_ids(__A,__A )
_lowerCamelCase : Union[str, Any] = target_ids
if top_k is not None:
_lowerCamelCase : Optional[int] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask",self.model.base_model_prefix,"The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : int,__A : List[str],*__A : Dict,**__A : List[str] ):
_lowerCamelCase : List[str] = super().__call__(__A,**__A )
if isinstance(__A,__A ) and len(__A ) == 1:
return outputs[0]
return outputs | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A_ ( _lowerCAmelCase : int = 8 ):
"""simple docstring"""
_lowerCamelCase : str = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
i -= len(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = i // 3
_lowerCamelCase : str = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCamelCase : Optional[Any] = (
chars_incl
+ random(_lowerCAmelCase , quotient + remainder )
+ random(_lowerCAmelCase , _lowerCAmelCase )
+ random(_lowerCAmelCase , _lowerCAmelCase )
)
_lowerCamelCase : Optional[Any] = list(_lowerCAmelCase )
shuffle(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
# random is a generalised function for letters, characters and numbers
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ):
"""simple docstring"""
pass # Put your code here...
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
pass # Put your code here...
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
"""simple docstring"""
pass # Put your code here...
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int = 8 ):
"""simple docstring"""
if len(_lowerCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCamelCase : List[Any] = any(char in ascii_uppercase for char in password )
_lowerCamelCase : str = any(char in ascii_lowercase for char in password )
_lowerCamelCase : Any = any(char in digits for char in password )
_lowerCamelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCamelCase : Any = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCAmelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCAmelCase , _lowerCAmelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase_ : List[Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
UpperCAmelCase_ : Dict = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
UpperCAmelCase_ : Tuple = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ),homepage="https://github.com/hendrycks/math",codebase_urls=["https://github.com/hendrycks/math"],)
def lowerCamelCase_ ( self : str,__A : List[Any],__A : List[str] ):
_lowerCamelCase : int = 0.0
for i, j in zip(__A,__A ):
n_correct += 1.0 if math_equivalence.is_equiv(__A,__A ) else 0.0
_lowerCamelCase : Tuple = n_correct / len(__A )
return {
"accuracy": accuracy,
} | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = 'โ'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCAmelCase_ : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
UpperCAmelCase_ : List[str] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir,"models/bert/" ) )
_lowerCamelCase : Any = self.transformer_dir
shutil.copy(
os.path.join(__A,"src/transformers/models/bert/modeling_bert.py" ),os.path.join(self.transformer_dir,"models/bert/modeling_bert.py" ),)
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = "src/transformers"
shutil.rmtree(self.transformer_dir )
def lowerCamelCase_ ( self : Any,__A : Dict,__A : List[Any],__A : Union[str, Any],__A : Any=None ):
_lowerCamelCase : List[Any] = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
_lowerCamelCase : Union[str, Any] = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
_lowerCamelCase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa},line_length=1_1_9 )
_lowerCamelCase : Union[str, Any] = black.format_str(__A,mode=__A )
_lowerCamelCase : Dict = os.path.join(self.transformer_dir,"new_code.py" )
with open(__A,"w",newline="\n" ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name,overwrite=__A )
with open(__A,"r" ) as f:
self.assertTrue(f.read(),__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Dict ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead","BertLMPredictionHead",REFERENCE_CODE + "\n",)
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead","BertLMPredictionHead",__A,)
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel","TestModelLMPredictionHead",re.sub("Bert","TestModel",__A ),)
# Copy consistency with a really long name
_lowerCamelCase : Tuple = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}',f'{long_class_name}LMPredictionHead',re.sub("Bert",__A,__A ),)
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel","TestModelLMPredictionHead",__A,overwrite_result=re.sub("Bert","TestModel",__A ),)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Any = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_lowerCamelCase : int = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_lowerCamelCase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (ๆฅ่ช Google Research and the"
" Toyota Technological Institute at Chicago) ไผด้่ฎบๆ [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), ็ฑ Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut ๅๅธใ\n"
)
_lowerCamelCase : List[str] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (ๆฅ่ช Google Research and the"
" Toyota Technological Institute at Chicago) ไผด้่ฎบๆ [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), ็ฑ Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut ๅๅธใ\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (ๆฅ่ช HuggingFace) ไผด้่ฎบๆ"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) ็ฑ Victor Sanh, Lysandre Debut and Thomas Wolf ๅๅธใ The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (ๆฅ่ช"
" Google Research/Stanford University) ไผด้่ฎบๆ [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) ็ฑ Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning ๅๅธใ\n"
)
_lowerCamelCase : Any = check_copies.convert_to_localized_md(
__A,__A,localized_readme["format_model_list"] )
self.assertFalse(__A )
self.assertEqual(__A,__A )
_lowerCamelCase : Tuple = check_copies.convert_to_localized_md(
__A,__A,localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__A )
_lowerCamelCase : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_lowerCamelCase : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (ๆฅ่ช Google Research and"
" the Toyota Technological Institute at Chicago) ไผด้่ฎบๆ [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), ็ฑ Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut ๅๅธใ\n"
)
_lowerCamelCase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (ๆฅ่ช Google Research and the"
" Toyota Technological Institute at Chicago) ไผด้่ฎบๆ [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), ็ฑ Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut ๅๅธใ\n"
)
_lowerCamelCase : List[Any] = check_copies.convert_to_localized_md(
__A,__A,localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__A,__A ) | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", "โ", "โ", "โฏ", "โ
", "ใ", "โ", " ", "โ", "โ", "๏ฟผ", "ย"}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
lowerCAmelCase_ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = self.task_name.lower()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
lowerCAmelCase_ = 'test'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Any,__A : GlueDataTrainingArguments,__A : PreTrainedTokenizerBase,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[str] = None,):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the ๐ค Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",__A,)
_lowerCamelCase : int = args
_lowerCamelCase : int = glue_processors[args.task_name]()
_lowerCamelCase : int = glue_output_modes[args.task_name]
if isinstance(__A,__A ):
try:
_lowerCamelCase : Optional[Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_lowerCamelCase : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}',)
_lowerCamelCase : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCamelCase : Dict = label_list[2], label_list[1]
_lowerCamelCase : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Dict = torch.load(__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
_lowerCamelCase : Any = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_lowerCamelCase : Optional[int] = self.processor.get_test_examples(args.data_dir )
else:
_lowerCamelCase : int = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_lowerCamelCase : Union[str, Any] = examples[:limit_length]
_lowerCamelCase : List[str] = glue_convert_examples_to_features(
__A,__A,max_length=args.max_seq_length,label_list=__A,output_mode=self.output_mode,)
_lowerCamelCase : int = time.time()
torch.save(self.features,__A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : str,__A : str ):
return self.features[i]
def lowerCamelCase_ ( self : Dict ):
return self.label_list
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = credit_card_number
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[Any] = len(_lowerCAmelCase ) - 2
for i in range(_lowerCAmelCase , -1 , -2 ):
# double the value of every second digit
_lowerCamelCase : str = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 ร 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_lowerCamelCase : Optional[int] = cc_number[:i] + str(_lowerCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_lowerCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Any = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_lowerCAmelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_lowerCAmelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_lowerCAmelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323') | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number: ').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''') | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCAmelCase_ : str = threading.Lock()
UpperCAmelCase_ : Optional[logging.Handler] = None
UpperCAmelCase_ : List[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase_ : str = logging.WARNING
UpperCAmelCase_ : Dict = True
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = os.getenv("TRANSFORMERS_VERBOSITY" , _lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def A_ ( ):
"""simple docstring"""
return __name__.split("." )[0]
def A_ ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def A_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCamelCase : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCamelCase : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCamelCase : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCamelCase : Tuple = False
def A_ ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCamelCase : str = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCamelCase : int = None
def A_ ( ):
"""simple docstring"""
return log_levels
def A_ ( _lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if name is None:
_lowerCamelCase : Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
return set_verbosity(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A_ ( _lowerCAmelCase : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCamelCase : Union[str, Any] = False
def A_ ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowerCamelCase : List[Any] = True
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
_lowerCamelCase : Dict = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_lowerCAmelCase )
def A_ ( self : List[str] , *_lowerCAmelCase : int , **_lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , _lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : Any = warning_advice
@functools.lru_cache(_lowerCAmelCase )
def A_ ( self : Dict , *_lowerCAmelCase : Any , **_lowerCAmelCase : str ):
"""simple docstring"""
self.warning(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ : int = warning_once
class UpperCAmelCase__ :
def __init__( self : Optional[Any],*__A : List[str],**__A : Union[str, Any] ): # pylint: disable=unused-argument
_lowerCamelCase : Optional[Any] = args[0] if args else None
def __iter__( self : str ):
return iter(self._iterator )
def __getattr__( self : List[Any],__A : Union[str, Any] ):
def empty_fn(*__A : Optional[Any],**__A : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Any ):
return self
def __exit__( self : Any,__A : List[Any],__A : Optional[int],__A : int ):
return
class UpperCAmelCase__ :
def __call__( self : Optional[Any],*__A : Optional[Any],**__A : Union[str, Any] ):
if _tqdm_active:
return tqdm_lib.tqdm(*__A,**__A )
else:
return EmptyTqdm(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Tuple,**__A : Optional[int] ):
_lowerCamelCase : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A,**__A )
def lowerCamelCase_ ( self : Optional[Any] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ : str = _tqdm_cls()
def A_ ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def A_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCamelCase : Union[str, Any] = True
hf_hub_utils.enable_progress_bars()
def A_ ( ):
"""simple docstring"""
global _tqdm_active
_lowerCamelCase : List[Any] = False
hf_hub_utils.disable_progress_bars() | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
'''simple docstring'''
import random
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : dict = {i: [] for i in range(_lowerCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCAmelCase ):
for j in range(i + 1 , _lowerCAmelCase ):
if random.random() < probability:
graph[i].append(_lowerCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCAmelCase )
return graph
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
return {
i: [j for j in range(_lowerCAmelCase ) if i != j] for i in range(_lowerCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase_ : Optional[Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
UpperCAmelCase_ : List[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
UpperCAmelCase_ : int = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(self._get_feature_types() ),reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
],)
def lowerCamelCase_ ( self : Dict ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def lowerCamelCase_ ( self : Optional[Any],__A : Tuple,__A : Any,__A : Optional[int]=None,__A : List[str]="uniform_average",__A : List[Any]=True ):
_lowerCamelCase : int = mean_squared_error(
__A,__A,sample_weight=__A,multioutput=__A,squared=__A )
return {"mse": mse} | 701 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Union[str, Any],__A : Union[str, Any]=1_3,__A : Union[str, Any]=7,__A : Tuple=False,__A : int=True,__A : int=False,__A : List[str]=False,__A : int=1_9,__A : Optional[Any]=3_2,__A : int=5,__A : str=4,__A : Optional[int]=3_7,__A : Dict="gelu",__A : Union[str, Any]=0.1,__A : List[str]=0.1,__A : Optional[Any]=5_1_2,__A : Optional[Any]=1_6,__A : Union[str, Any]=2,__A : Union[str, Any]=0.02,__A : str=3,__A : Optional[Any]=4,__A : str=None,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : Tuple = use_input_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : Dict = scope
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = EsmConfig(
vocab_size=3_3,hidden_size=self.hidden_size,pad_token_id=1,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,is_folding_model=__A,esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False},)
return config
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : int,__A : List[Any],__A : Tuple,__A : int,__A : Union[str, Any] ):
_lowerCamelCase : int = EsmForProteinFolding(config=__A ).float()
model.to(__A )
model.eval()
_lowerCamelCase : int = model(__A,attention_mask=__A )
_lowerCamelCase : Optional[int] = model(__A )
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.positions.shape,(8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape,(8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Dict = config_and_inputs
_lowerCamelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = False
lowerCAmelCase_ = (EsmForProteinFolding,) if is_torch_available() else ()
lowerCAmelCase_ = ()
lowerCAmelCase_ = {} if is_torch_available() else {}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Any = EsmFoldModelTester(self )
_lowerCamelCase : str = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@unittest.skip("Does not support attention outputs" )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCamelCase_ ( self : int ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def lowerCamelCase_ ( self : Dict ):
pass
@unittest.skip("ESMFold only has one output format." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def lowerCamelCase_ ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def lowerCamelCase_ ( self : str ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCamelCase_ ( self : List[str] ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase_ ( self : Union[str, Any] ):
pass
@require_torch
class UpperCAmelCase__ ( A ):
@slow
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
_lowerCamelCase : Dict = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_lowerCamelCase : Tuple = model(__A )["positions"]
_lowerCamelCase : Optional[int] = torch.tensor([2.5828, 0.7993, -10.9334],dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0],__A,atol=1e-4 ) ) | 702 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 0 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase_ : Any = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self : Optional[Any],__A : Optional[str] = None ):
_lowerCamelCase : List[str] = (
os.path.join(__A,config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCamelCase : int = Extractor
def lowerCamelCase_ ( self : List[str],__A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCamelCase : str = os.path.abspath(__A )
return os.path.join(self.extract_dir,hash_url_to_filename(__A ) )
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : bool ):
return force_extract or (
not os.path.isfile(__A ) and not (os.path.isdir(__A ) and os.listdir(__A ))
)
def lowerCamelCase_ ( self : Tuple,__A : str,__A : bool = False ):
_lowerCamelCase : Tuple = self.extractor.infer_extractor_format(__A )
if not extractor_format:
return input_path
_lowerCamelCase : int = self._get_output_path(__A )
if self._do_extract(__A,__A ):
self.extractor.extract(__A,__A,__A )
return output_path
class UpperCAmelCase__ ( A ):
@classmethod
@abstractmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],**__A : int ):
...
@staticmethod
@abstractmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
...
class UpperCAmelCase__ ( A , A ):
lowerCAmelCase_ = []
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : int ):
with open(__A,"rb" ) as f:
return f.read(__A )
@classmethod
def lowerCamelCase_ ( cls : int,__A : Union[Path, str],__A : bytes = b"" ):
if not magic_number:
_lowerCamelCase : int = max(len(__A ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCamelCase : Any = cls.read_magic_number(__A,__A )
except OSError:
return False
return any(magic_number.startswith(__A ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( A ):
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],**__A : Optional[int] ):
return tarfile.is_tarfile(__A )
@staticmethod
def lowerCamelCase_ ( __A : Optional[Any],__A : Dict ):
def resolved(__A : str ) -> str:
return os.path.realpath(os.path.abspath(__A ) )
def badpath(__A : str,__A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(__A,__A ) ).startswith(__A )
def badlink(__A : int,__A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCamelCase : Union[str, Any] = resolved(os.path.join(__A,os.path.dirname(info.name ) ) )
return badpath(info.linkname,base=__A )
_lowerCamelCase : Optional[Any] = resolved(__A )
for finfo in members:
if badpath(finfo.name,__A ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(__A,__A ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(__A,__A ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
os.makedirs(__A,exist_ok=__A )
_lowerCamelCase : int = tarfile.open(__A )
tar_file.extractall(__A,members=TarExtractor.safemembers(__A,__A ) )
tar_file.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x1F\x8B']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with gzip.open(__A,"rb" ) as gzip_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : Union[Path, str],__A : bytes = b"" ):
if super().is_extractable(__A,magic_number=__A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(__A,"rb" ) as fp:
_lowerCamelCase : Optional[Any] = _EndRecData(__A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCamelCase : List[Any] = fp.read(__A ) # CD is where we expect it to be
if len(__A ) == sizeCentralDir:
_lowerCamelCase : List[Any] = struct.unpack(__A,__A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
os.makedirs(__A,exist_ok=__A )
with zipfile.ZipFile(__A,"r" ) as zip_file:
zip_file.extractall(__A )
zip_file.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with lzma.open(__A ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(__A,exist_ok=__A )
_lowerCamelCase : int = rarfile.RarFile(__A )
rf.extractall(__A )
rf.close()
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_lowerCamelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(__A,"rb" ) as ifh, open(__A,"wb" ) as ofh:
dctx.copy_stream(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x42\x5A\x68']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
with bza.open(__A,"rb" ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(__A,exist_ok=__A )
with pyazr.SevenZipFile(__A,"r" ) as archive:
archive.extractall(__A )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = [B'\x04\x22\x4D\x18']
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(__A,"rb" ) as compressed_file:
with open(__A,"wb" ) as extracted_file:
shutil.copyfileobj(__A,__A )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase_ = {
'tar': TarExtractor,
'gzip': GzipExtractor,
'zip': ZipExtractor,
'xz': XzExtractor,
'rar': RarExtractor,
'zstd': ZstdExtractor,
'bz2': BzipaExtractor,
'7z': SevenZipExtractor, # <Added version="2.4.0"/>
'lz4': LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] ):
return max(
len(__A )
for extractor in cls.extractors.values()
if issubclass(__A,__A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase_ ( __A : Union[Path, str],__A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(__A,magic_number_length=__A )
except OSError:
return b""
@classmethod
def lowerCamelCase_ ( cls : List[str],__A : Union[Path, str],__A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead.",category=__A,)
_lowerCamelCase : Optional[Any] = cls.infer_extractor_format(__A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase_ ( cls : Optional[int],__A : Union[Path, str] ): # <Added version="2.4.0"/>
_lowerCamelCase : Optional[int] = cls._get_magic_number_max_length()
_lowerCamelCase : Union[str, Any] = cls._read_magic_number(__A,__A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(__A,magic_number=__A ):
return extractor_format
@classmethod
def lowerCamelCase_ ( cls : List[Any],__A : Union[Path, str],__A : Union[Path, str],__A : Optional[str] = None,__A : Optional[BaseExtractor] = "deprecated",):
os.makedirs(os.path.dirname(__A ),exist_ok=__A )
# Prevent parallel extractions
_lowerCamelCase : Union[str, Any] = str(Path(__A ).with_suffix(".lock" ) )
with FileLock(__A ):
shutil.rmtree(__A,ignore_errors=__A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(__A,__A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead.",category=__A,)
_lowerCamelCase : Dict = extractor if extractor != "deprecated" else extractor_format
else:
_lowerCamelCase : str = cls.extractors[extractor_format]
return extractor.extract(__A,__A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0.",category=__A,)
for extractor in cls.extractors.values():
if extractor.is_extractable(__A ):
return extractor.extract(__A,__A ) | 703 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Union[str, Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : List[str] = 16
_lowerCamelCase : Union[str, Any] = [5, 11, 17, 23]
_lowerCamelCase : int = [256, 512, 1024, 1024]
_lowerCamelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowerCamelCase : Dict = 768
_lowerCamelCase : List[Any] = [1, 1, 1, 0.5]
_lowerCamelCase : Optional[int] = [256, 512, 768, 768]
_lowerCamelCase : str = 150
_lowerCamelCase : List[Any] = 16
_lowerCamelCase : Optional[int] = (1, 384, 384)
_lowerCamelCase : int = False
_lowerCamelCase : Tuple = "project"
if "ade" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[int] = [1, 1, 1, 0.5]
_lowerCamelCase : Dict = 150
_lowerCamelCase : Dict = 16
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[int] = "ade20k-id2label.json"
_lowerCamelCase : List[Any] = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCamelCase : str = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
_lowerCamelCase : List[str] = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
_lowerCamelCase : str = name.replace("patch_embed" , "" )
if "pos_embed" in name:
_lowerCamelCase : Optional[int] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
_lowerCamelCase : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
_lowerCamelCase : Union[str, Any] = name.replace("proj" , "projection" )
if "blocks" in name:
_lowerCamelCase : Dict = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
_lowerCamelCase : Optional[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCamelCase : List[str] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
_lowerCamelCase : Union[str, Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
_lowerCamelCase : Tuple = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
_lowerCamelCase : Any = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
_lowerCamelCase : Dict = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
_lowerCamelCase : List[Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
_lowerCamelCase : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
_lowerCamelCase : List[Any] = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
_lowerCamelCase : int = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
_lowerCamelCase : Union[str, Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCamelCase : List[str] = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
_lowerCamelCase : Optional[Any] = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
_lowerCamelCase : List[Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
_lowerCamelCase : Tuple = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
_lowerCamelCase : Union[str, Any] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
_lowerCamelCase : Optional[int] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCamelCase : List[str] = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCamelCase : int = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCamelCase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCamelCase : int = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCamelCase : Tuple = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
_lowerCamelCase : List[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
_lowerCamelCase : Any = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
_lowerCamelCase : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
_lowerCamelCase : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
_lowerCamelCase : str = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
_lowerCamelCase : Optional[int] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
_lowerCamelCase : List[Any] = name.replace("pretrained" , "dpt" )
if "bn" in name:
_lowerCamelCase : Union[str, Any] = name.replace("bn" , "batch_norm" )
if "head" in name:
_lowerCamelCase : List[Any] = name.replace("head" , "head.head" )
if "encoder.norm" in name:
_lowerCamelCase : Optional[Any] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
_lowerCamelCase : Union[str, Any] = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
_lowerCamelCase : int = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
_lowerCamelCase : Dict = name.replace(".." , "." )
if "stem.conv" in name:
_lowerCamelCase : Optional[int] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowerCamelCase : Optional[Any] = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
_lowerCamelCase : Optional[int] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
_lowerCamelCase : Tuple = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
_lowerCamelCase : List[Any] = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
_lowerCamelCase : List[str] = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
_lowerCamelCase : int = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
_lowerCamelCase : int = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Any = in_proj_weight[: config.hidden_size, :]
_lowerCamelCase : int = in_proj_bias[: config.hidden_size]
_lowerCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Dict = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : List[str] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
_lowerCamelCase : List[Any] = state_dict.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
_lowerCamelCase : Any = DPTForSemanticSegmentation(_lowerCAmelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
_lowerCamelCase : Dict = 480 if "ade" in checkpoint_url else 384
_lowerCamelCase : Union[str, Any] = DPTImageProcessor(size=_lowerCAmelCase )
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : Any = image_processor(_lowerCAmelCase , return_tensors="pt" )
# forward pass
_lowerCamelCase : Dict = model(**_lowerCAmelCase ).logits if "ade" in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
_lowerCamelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 704 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : bool = False ):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
_lowerCamelCase : Any = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
_lowerCamelCase : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
_lowerCamelCase : str = primes[:idx]
break
_lowerCamelCase : List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_lowerCamelCase : int = False
for r in range(_lowerCAmelCase ):
_lowerCamelCase : Any = pow(_lowerCAmelCase , d * 2**r , _lowerCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_lowerCamelCase : Optional[Any] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def A_ ( ):
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin() | 705 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 0 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _lowerCAmelCase : float = 0.1 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 706 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 707 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : int ):
_lowerCamelCase : Union[str, Any] = num_of_nodes
_lowerCamelCase : list[list[int]] = []
_lowerCamelCase : dict[int, int] = {}
def lowerCamelCase_ ( self : str,__A : int,__A : int,__A : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCamelCase_ ( self : int,__A : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCamelCase_ ( self : Union[str, Any],__A : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCamelCase : Optional[int] = self.find_component(__A )
def lowerCamelCase_ ( self : List[Any],__A : list[int],__A : int,__A : int ):
if component_size[u_node] <= component_size[v_node]:
_lowerCamelCase : Tuple = v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCamelCase : Optional[int] = self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCamelCase : List[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCamelCase : List[Any] = edge
_lowerCamelCase : List[str] = self.m_component[u]
_lowerCamelCase : Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCamelCase : Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A,__A ):
_lowerCamelCase : Dict = edge
_lowerCamelCase : Optional[Any] = self.m_component[u]
_lowerCamelCase : Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A,__A,__A )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
_lowerCamelCase : Tuple = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 708 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the ๐ค hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 0 |
def A_ ( _lowerCAmelCase : int = 600851475143 ):
"""simple docstring"""
try:
_lowerCamelCase : int = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_lowerCamelCase : Union[str, Any] = i
while n % i == 0:
_lowerCamelCase : Optional[int] = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''') | 709 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(),encoding="utf-8",check=__A,)
assert hasattr(self,"env" )
def lowerCamelCase_ ( self : Optional[Any],__A : int ):
_lowerCamelCase : Union[str, Any] = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
_lowerCamelCase : Tuple = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script,source_dir=self.env.test_path,role=self.env.role,image_uri=self.env.image_uri,base_job_name=__A,instance_count=__A,instance_type=self.instance_type,debugger_hook_config=__A,hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path},metric_definitions=self.env.metric_definitions,distribution=__A,py_version="py36",)
def lowerCamelCase_ ( self : Dict,__A : Optional[Any] ):
TrainingJobAnalytics(__A ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def lowerCamelCase_ ( self : Tuple,__A : Dict ):
# create estimator
_lowerCamelCase : Any = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : Union[str, Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds",9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json',"w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss},__A )
| 710 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'detr'
lowerCAmelCase_ = ['past_key_values']
lowerCAmelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int],__A : Tuple=True,__A : List[Any]=None,__A : List[str]=3,__A : Any=1_0_0,__A : Union[str, Any]=6,__A : Union[str, Any]=2_0_4_8,__A : str=8,__A : Dict=6,__A : Tuple=2_0_4_8,__A : Optional[int]=8,__A : int=0.0,__A : Union[str, Any]=0.0,__A : Optional[Any]=True,__A : Union[str, Any]="relu",__A : str=2_5_6,__A : int=0.1,__A : List[str]=0.0,__A : Union[str, Any]=0.0,__A : int=0.02,__A : Union[str, Any]=1.0,__A : str=False,__A : int="sine",__A : List[str]="resnet50",__A : List[Any]=True,__A : Tuple=False,__A : Dict=1,__A : Optional[Any]=5,__A : Optional[int]=2,__A : List[str]=1,__A : int=1,__A : str=5,__A : List[Any]=2,__A : Union[str, Any]=0.1,**__A : Union[str, Any],):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCamelCase : int = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__A,__A ):
_lowerCamelCase : Union[str, Any] = backbone_config.get("model_type" )
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Optional[int] = config_class.from_dict(__A )
# set timm attributes to None
_lowerCamelCase : List[Any] = None, None, None
_lowerCamelCase : Tuple = use_timm_backbone
_lowerCamelCase : Dict = backbone_config
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Union[str, Any] = num_queries
_lowerCamelCase : Optional[Any] = d_model
_lowerCamelCase : Any = encoder_ffn_dim
_lowerCamelCase : Optional[Any] = encoder_layers
_lowerCamelCase : int = encoder_attention_heads
_lowerCamelCase : Union[str, Any] = decoder_ffn_dim
_lowerCamelCase : Dict = decoder_layers
_lowerCamelCase : Dict = decoder_attention_heads
_lowerCamelCase : Optional[Any] = dropout
_lowerCamelCase : Tuple = attention_dropout
_lowerCamelCase : Union[str, Any] = activation_dropout
_lowerCamelCase : List[Any] = activation_function
_lowerCamelCase : str = init_std
_lowerCamelCase : Dict = init_xavier_std
_lowerCamelCase : Tuple = encoder_layerdrop
_lowerCamelCase : List[str] = decoder_layerdrop
_lowerCamelCase : Optional[int] = encoder_layers
_lowerCamelCase : List[str] = auxiliary_loss
_lowerCamelCase : Optional[int] = position_embedding_type
_lowerCamelCase : Union[str, Any] = backbone
_lowerCamelCase : str = use_pretrained_backbone
_lowerCamelCase : Union[str, Any] = dilation
# Hungarian matcher
_lowerCamelCase : Any = class_cost
_lowerCamelCase : int = bbox_cost
_lowerCamelCase : Tuple = giou_cost
# Loss coefficients
_lowerCamelCase : Union[str, Any] = mask_loss_coefficient
_lowerCamelCase : Tuple = dice_loss_coefficient
_lowerCamelCase : str = bbox_loss_coefficient
_lowerCamelCase : Dict = giou_loss_coefficient
_lowerCamelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=__A,**__A )
@property
def lowerCamelCase_ ( self : Tuple ):
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self : str ):
return self.d_model
@classmethod
def lowerCamelCase_ ( cls : str,__A : PretrainedConfig,**__A : Optional[Any] ):
return cls(backbone_config=__A,**__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCamelCase : List[Any] = self.backbone_config.to_dict()
_lowerCamelCase : List[str] = self.__class__.model_type
return output
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return 1e-5
@property
def lowerCamelCase_ ( self : Optional[int] ):
return 1_2 | 711 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[13.3523, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[16.1873, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the ๐ค hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 712 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase__ ( A ):
@require_torch
def lowerCamelCase_ ( self : int ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCamelCase : Optional[int] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_lowerCamelCase : Optional[int] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_lowerCamelCase : List[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_lowerCamelCase : Optional[int] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task="fill-mask",model=__A )
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_lowerCamelCase : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : int = "1"
_lowerCamelCase : Any = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : List[str] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCamelCase : Any = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
_lowerCamelCase : Union[str, Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
_lowerCamelCase : Any = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
_lowerCamelCase : Dict = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__A )
BertModel.from_pretrained(__A )
BertTokenizer.from_pretrained(__A )
pipeline(task="fill-mask",model=__A )
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Union[str, Any] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
_lowerCamelCase : int = self.get_env()
_lowerCamelCase : Optional[Any] = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCamelCase : List[str] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
_lowerCamelCase : List[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
_lowerCamelCase : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Optional[int] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_lowerCamelCase : Dict = self.get_env()
_lowerCamelCase : Any = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
# next emulate no network
_lowerCamelCase : str = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : List[Any] = "1"
_lowerCamelCase : int = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = "\nfrom transformers import pipeline\n "
_lowerCamelCase : List[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
_lowerCamelCase : str = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
_lowerCamelCase : Union[str, Any] = self.get_env()
_lowerCamelCase : Any = "1"
_lowerCamelCase : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
_lowerCamelCase : Tuple = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,1,result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode",result.stderr.decode().replace("\n","" ),)
@require_torch
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Dict = "\nfrom transformers import AutoModel\n "
_lowerCamelCase : List[Any] = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Tuple = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
_lowerCamelCase : Optional[Any] = self.get_env()
_lowerCamelCase : Tuple = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : str = "1"
_lowerCamelCase : int = subprocess.run(__A,env=__A,check=__A,capture_output=__A )
self.assertEqual(result.returncode,0,result.stderr )
self.assertIn("success",result.stdout.decode() )
| 713 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
UpperCAmelCase_ : Any = 1
while K:
UpperCAmelCase_ : Optional[Any] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
UpperCAmelCase_ : str = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...') | 714 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 715 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'upernet'
def __init__( self : Any,__A : Optional[Any]=None,__A : Dict=5_1_2,__A : Optional[Any]=0.02,__A : List[Any]=[1, 2, 3, 6],__A : Dict=True,__A : str=0.4,__A : Union[str, Any]=3_8_4,__A : Optional[Any]=2_5_6,__A : Optional[int]=1,__A : Any=False,__A : Dict=2_5_5,**__A : Optional[Any],):
super().__init__(**__A )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_lowerCamelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(__A,__A ):
_lowerCamelCase : List[Any] = backbone_config.get("model_type" )
_lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Union[str, Any] = config_class.from_dict(__A )
_lowerCamelCase : List[Any] = backbone_config
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Dict = pool_scales
_lowerCamelCase : Any = use_auxiliary_head
_lowerCamelCase : Any = auxiliary_loss_weight
_lowerCamelCase : str = auxiliary_in_channels
_lowerCamelCase : str = auxiliary_channels
_lowerCamelCase : Optional[int] = auxiliary_num_convs
_lowerCamelCase : Dict = auxiliary_concat_input
_lowerCamelCase : List[Any] = loss_ignore_index
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : List[Any] = self.backbone_config.to_dict()
_lowerCamelCase : Optional[int] = self.__class__.model_type
return output | 716 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = 'โ'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", "โ", "โ", "โฏ", "โ
", "ใ", "โ", " ", "โ", "โ", "๏ฟผ", "ย"}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = {}
_lowerCamelCase : Optional[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : Any = len(example["content"] ) / len(output["input_ids"] )
return output
UpperCAmelCase_ : str = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase_ : Any = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : Any = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase_ : int = time.time()
UpperCAmelCase_ : int = load_dataset(args.dataset_name, split='train')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase_ : int = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 718 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return [ord(_lowerCAmelCase ) - 96 for elem in plain]
def A_ ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = encode(input("-> " ).strip().lower() )
print("Encoded: " , _lowerCAmelCase )
print("Decoded:" , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main() | 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 0 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : str ):
super().__init__()
_lowerCamelCase : Union[str, Any] = nn.Linear(3,4 )
_lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
_lowerCamelCase : Dict = nn.Linear(4,5 )
def lowerCamelCase_ ( self : int,__A : Optional[Any] ):
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],*__A : List[Any],**__A : str ):
return (args[0] + 1,) + args[1:], kwargs
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Union[str, Any] ):
return output + 1
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = ModelForTest()
_lowerCamelCase : int = ModelHook()
add_hook_to_module(__A,__A )
self.assertEqual(test_model._hf_hook,__A )
self.assertTrue(hasattr(__A,"_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__,"forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ),["x"] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A,"_hf_hook" ) )
self.assertFalse(hasattr(__A,"_old_forward" ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = ModelForTest()
_lowerCamelCase : Any = ModelHook()
add_hook_to_module(__A,__A )
add_hook_to_module(__A,__A,append=__A )
self.assertEqual(isinstance(test_model._hf_hook,__A ),__A )
self.assertEqual(len(test_model._hf_hook.hooks ),2 )
self.assertTrue(hasattr(__A,"_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__,"forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ),["x"] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A,"_hf_hook" ) )
self.assertFalse(hasattr(__A,"_old_forward" ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = ModelForTest()
_lowerCamelCase : Tuple = torch.randn(2,3 )
_lowerCamelCase : Union[str, Any] = test_model(x + 1 )
_lowerCamelCase : int = test_model(x + 2 )
_lowerCamelCase : Union[str, Any] = PreForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : str = test_model(__A )
self.assertTrue(torch.allclose(__A,__A,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCamelCase : List[str] = PreForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : List[Any] = test_model(__A )
self.assertTrue(torch.allclose(__A,__A,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCamelCase : Any = SequentialHook(PreForwardHook(),PreForwardHook() )
add_hook_to_module(__A,__A )
_lowerCamelCase : Any = test_model(__A )
assert torch.allclose(__A,__A,atol=1e-5 )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = ModelForTest()
_lowerCamelCase : Optional[int] = torch.randn(2,3 )
_lowerCamelCase : Optional[Any] = test_model(__A )
_lowerCamelCase : List[Any] = PostForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : str = test_model(__A )
self.assertTrue(torch.allclose(__A,output + 1,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCamelCase : int = PostForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : Any = test_model(__A )
self.assertTrue(torch.allclose(__A,output + 1,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook(),PostForwardHook() )
add_hook_to_module(__A,__A )
_lowerCamelCase : Tuple = test_model(__A )
assert torch.allclose(__A,output + 2,atol=1e-5 )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = ModelForTest()
_lowerCamelCase : List[Any] = torch.randn(2,3 )
_lowerCamelCase : Union[str, Any] = test_model(__A )
_lowerCamelCase : Optional[int] = PostForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : Optional[Any] = test_model(__A )
self.assertTrue(torch.allclose(__A,output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = test_model(__A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowerCamelCase : Optional[Any] = torch.randn(2,3 )
_lowerCamelCase : Dict = model(__A )
self.assertEqual(output.device,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__A,AlignDevicesHook(io_same_device=__A ) )
_lowerCamelCase : str = torch.randn(2,3 ).to(0 )
_lowerCamelCase : Any = model(__A )
self.assertEqual(output.device,torch.device(0 ) )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# This will move each submodule on different devices
_lowerCamelCase : Optional[Any] = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara,AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm,AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara,AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : Dict = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device,__A )
_lowerCamelCase : int = torch.randn(2,3 )
_lowerCamelCase : Optional[int] = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# Now test with buffers included in the offload
_lowerCamelCase : Union[str, Any] = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara,AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm,AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara,AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device,torch.device("meta" ) )
_lowerCamelCase : Dict = torch.randn(2,3 )
_lowerCamelCase : List[Any] = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# This will move each submodule on different devices
_lowerCamelCase : Union[str, Any] = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(__A,execution_device=__A,offload=__A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : Optional[int] = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device,__A )
_lowerCamelCase : Optional[int] = torch.randn(2,3 )
_lowerCamelCase : Any = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__A,execution_device=__A,offload=__A,offload_buffers=__A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device,torch.device("meta" ) )
_lowerCamelCase : Optional[int] = torch.randn(2,3 )
_lowerCamelCase : int = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# This will move each submodule on different devices
_lowerCamelCase : Tuple = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
__A,execution_device=__A,offload=__A,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : Dict = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device,__A )
_lowerCamelCase : Optional[Any] = torch.randn(2,3 )
_lowerCamelCase : List[Any] = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__A,execution_device=__A,offload=__A,weights_map=model.state_dict(),offload_buffers=__A,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device,torch.device("meta" ) )
_lowerCamelCase : List[Any] = torch.randn(2,3 )
_lowerCamelCase : Optional[int] = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) ) | 720 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = DistilBertTokenizer
lowerCAmelCase_ = DistilBertTokenizerFast
lowerCAmelCase_ = True
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_lowerCamelCase : str = tokenizer.encode("sequence builders",add_special_tokens=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode("multi-sequence build",add_special_tokens=__A )
_lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A )
_lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__A,__A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 721 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# Load configuration defined in the metadata file
with open(__lowerCAmelCase ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = LukeConfig(use_entity_aware_attention=__lowerCAmelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : Dict = load_original_entity_vocab(__lowerCAmelCase )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE__ : Dict = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken("""<ent>""" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken("""<ent2>""" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = """MLukeTokenizer"""
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = MLukeTokenizer.from_pretrained(__lowerCAmelCase )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
SCREAMING_SNAKE_CASE__ : Any = state_dict["""embeddings.word_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ : int = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE__ : List[str] = state_dict[bias_name]
SCREAMING_SNAKE_CASE__ : List[str] = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = F'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : Tuple = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict["""entity_predictions.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE__ : Optional[int] = LukeForMaskedLM(config=__lowerCAmelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
SCREAMING_SNAKE_CASE__ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict[key]
else:
SCREAMING_SNAKE_CASE__ : Dict = state_dict[key]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if set(__lowerCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__lowerCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = MLukeTokenizer.from_pretrained(__lowerCAmelCase , task="""entity_classification""" )
SCREAMING_SNAKE_CASE__ : Tuple = """ISO 639-3 uses the code fas for the dialects spoken across Iran and ใขใใฌใในใฟใณ (Afghanistan)."""
SCREAMING_SNAKE_CASE__ : str = (0, 9)
SCREAMING_SNAKE_CASE__ : int = tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**__lowerCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 33, 768) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE__ : Tuple = MLukeTokenizer.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = """Tokyo is the capital of <mask>."""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (24, 30)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(__lowerCAmelCase , entity_spans=[span] , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = encoding["""input_ids"""][0].tolist()
SCREAMING_SNAKE_CASE__ : str = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE__ : List[str] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__lowerCAmelCase ) )
model.save_pretrained(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
SCREAMING_SNAKE_CASE__ : List[str] = [json.loads(__lowerCAmelCase ) for line in open(__lowerCAmelCase )]
SCREAMING_SNAKE_CASE__ : int = {}
for entry in data:
SCREAMING_SNAKE_CASE__ : List[Any] = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE__ : Optional[Any] = entity_id
break
SCREAMING_SNAKE_CASE__ : List[Any] = F'''{language}:{entity_name}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
a :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
a :int = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 12 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a :str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a :int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a :Dict = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a :List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a :str = "allenai"
def _lowercase ( __lowerCAmelCase ) -> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
SCREAMING_SNAKE_CASE__ : str = dict((re.sub(r"""@@$""" , """""" , __lowerCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , __lowerCAmelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE__ : Tuple = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d[k] # restore
return da
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
# prep
assert os.path.exists(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
SCREAMING_SNAKE_CASE__ : Optional[Any] = basename(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dirname(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE__ : Optional[int] = cls.hub_models()
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
SCREAMING_SNAKE_CASE__ : Optional[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hub_utils.from_pretrained(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , archive_map=__lowerCAmelCase , **__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vars(chkpt["""args"""]["""model"""] )
SCREAMING_SNAKE_CASE__ : Any = args["""source_lang"""]
SCREAMING_SNAKE_CASE__ : Any = args["""target_lang"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dirname(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = basename(__lowerCAmelCase )
# dicts
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(__lowerCAmelCase , F'''dict.{src_lang}.txt''' )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , F'''dict.{tgt_lang}.txt''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dictionary.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE__ : Optional[int] = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , """vocab-src.json""" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE__ : Tuple = False
break
SCREAMING_SNAKE_CASE__ : Optional[Any] = Dictionary.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(__lowerCAmelCase , """vocab-tgt.json""" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ):
break
with open(__lowerCAmelCase , encoding="""utf-8""" ) as fin:
SCREAMING_SNAKE_CASE__ : Any = fin.read()
SCREAMING_SNAKE_CASE__ : Tuple = re.sub(r""" \d+$""" , """""" , __lowerCAmelCase , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(__lowerCAmelCase )
# model config
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(__lowerCAmelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
SCREAMING_SNAKE_CASE__ : str = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE__ : Tuple = 5
SCREAMING_SNAKE_CASE__ : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE__ : Tuple = best_score_hparams[model_dir]["""length_penalty"""]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
SCREAMING_SNAKE_CASE__ : Dict = chkpt["""models"""][0]
SCREAMING_SNAKE_CASE__ : int = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE__ : str = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = FSMTConfig.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FSMTForConditionalGeneration(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
# save
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
a :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a :List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a :Dict = logging.get_logger(__name__)
a :Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
a :Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
a :Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = """whisper"""
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE :Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , _a=51_865 , _a=80 , _a=6 , _a=4 , _a=6 , _a=4 , _a=1_536 , _a=1_536 , _a=0.0 , _a=0.0 , _a=50_257 , _a=True , _a=True , _a="gelu" , _a=256 , _a=0.0 , _a=0.0 , _a=0.0 , _a=0.02 , _a=False , _a=1_500 , _a=448 , _a=50_256 , _a=50_256 , _a=50_256 , _a=None , _a=[220, 50_256] , _a=False , _a=256 , _a=False , _a=0.05 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a=7 , **_a , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_mel_bins
SCREAMING_SNAKE_CASE__ : List[Any] = d_model
SCREAMING_SNAKE_CASE__ : int = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = activation_function
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_std
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Any = max_source_positions
SCREAMING_SNAKE_CASE__ : Optional[int] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = classifier_proj_size
SCREAMING_SNAKE_CASE__ : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : Optional[int] = apply_spec_augment
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_time_prob
SCREAMING_SNAKE_CASE__ : Dict = mask_time_length
SCREAMING_SNAKE_CASE__ : Any = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Any = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Any = mask_feature_length
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_min_masks
SCREAMING_SNAKE_CASE__ : str = median_filter_width
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , suppress_tokens=_a , begin_suppress_tokens=_a , **_a , )
class __a (UpperCamelCase_):
'''simple docstring'''
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: """batch"""}
else:
SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a , direction="""inputs""" )
return common_inputs
def _a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , _a = 22_050 , _a = 5.0 , _a = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = OrderedDict()
SCREAMING_SNAKE_CASE__ : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_a , framework=_a , sampling_rate=_a , time_duration=_a , frequency=_a , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_inputs["""input_features"""].shape[2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_sequence_length // 2 if self.use_past else seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _a , _a , _a , _a )
SCREAMING_SNAKE_CASE__ : Any = encoder_inputs.pop("""input_features""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _a ( self ) -> float:
"""simple docstring"""
return 1E-3
| 12 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = (DDPMScheduler,)
def _a ( self , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_a )
return config
def _a ( self ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def _a ( self ) -> Any:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def _a ( self ) -> int:
"""simple docstring"""
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def _a ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def _a ( self ) -> str:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Any = len(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : int = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : str = pred_prev_sample
SCREAMING_SNAKE_CASE__ : str = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Dict = len(_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : int = model(_a , _a )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Any = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
else:
SCREAMING_SNAKE_CASE__ : Tuple = timesteps[i + 1]
SCREAMING_SNAKE_CASE__ : int = scheduler.previous_timestep(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prev_t.item()
self.assertEqual(_a , _a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : int = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE__ : List[str] = len(_a )
with self.assertRaises(_a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_a )
| 12 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = IFPipeline
_SCREAMING_SNAKE_CASE :str = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
_SCREAMING_SNAKE_CASE :Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE :Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self , _a , _a=0 ) -> int:
"""simple docstring"""
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self ) -> List[Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : str = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_a , tokenizer=_a )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Dict = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_a , _a , _a , _a )
def _a ( self , _a , _a , _a , _a ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , num_inference_steps=2 , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_a , _a )
def _a ( self , _a , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , num_inference_steps=2 , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Any = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_a , _a )
def _a ( self , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , num_inference_steps=2 , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Any = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_a , _a )
def _lowercase ( ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 12 |
"""simple docstring"""
import os
a :List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
while index < len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : List[Any] = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ : Dict = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] = """"""
SCREAMING_SNAKE_CASE__ : int = num // 1000
numerals += m_count * "M"
num %= 1000
SCREAMING_SNAKE_CASE__ : List[str] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
SCREAMING_SNAKE_CASE__ : List[Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowercase ( __lowerCAmelCase = "/p089_roman.txt" ) -> int:
SCREAMING_SNAKE_CASE__ : int = 0
with open(os.path.dirname(__lowerCAmelCase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ : str = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = line.strip()
SCREAMING_SNAKE_CASE__ : Dict = parse_roman_numerals(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = generate_roman_numerals(__lowerCAmelCase )
savings += len(__lowerCAmelCase ) - len(__lowerCAmelCase )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 12 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
SCREAMING_SNAKE_CASE__ : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ : List[str] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 12 | 1 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a :Optional[Any] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple[int, int]:
def constraint_to_multiple_of(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE__ : List[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Optional[Any] = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Dict = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : List[str] = (output_size, output_size) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = get_image_size(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_height / input_height
SCREAMING_SNAKE_CASE__ : str = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : List[Any] = scale_height
SCREAMING_SNAKE_CASE__ : Tuple = constraint_to_multiple_of(scale_height * input_height , multiple=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__lowerCAmelCase )
return (new_height, new_width)
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = ["""pixel_values"""]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ) -> None:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : int = size if size is not None else {"""height""": 384, """width""": 384}
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : Tuple = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : int = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : str = resample
SCREAMING_SNAKE_CASE__ : str = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : Any = do_normalize
SCREAMING_SNAKE_CASE__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = get_resize_output_image_size(
_a , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_a , multiple=_a , )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _a ( self , _a , _a , _a = None , **_a , ) -> int:
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def _a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray:
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : str = get_size_dict(_a )
SCREAMING_SNAKE_CASE__ : str = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : int = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : int = [self.normalize(image=_a , mean=_a , std=_a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [to_channel_dimension_format(_a , _a ) for image in images]
SCREAMING_SNAKE_CASE__ : Any = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def _a ( self , _a , _a = None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_a ):
SCREAMING_SNAKE_CASE__ : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_a ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 12 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a :List[Any] = logging.get_logger(__name__)
a :Optional[int] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = """focalnet"""
def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=False , _a=[192, 384, 768, 768] , _a=[2, 2, 6, 2] , _a=[2, 2, 2, 2] , _a=[3, 3, 3, 3] , _a="gelu" , _a=4.0 , _a=0.0 , _a=0.1 , _a=False , _a=1E-4 , _a=False , _a=False , _a=False , _a=0.02 , _a=1E-5 , _a=32 , _a=None , _a=None , **_a , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = embed_dim
SCREAMING_SNAKE_CASE__ : List[str] = use_conv_embed
SCREAMING_SNAKE_CASE__ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE__ : Optional[int] = depths
SCREAMING_SNAKE_CASE__ : Any = focal_levels
SCREAMING_SNAKE_CASE__ : Optional[Any] = focal_windows
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ : str = use_layerscale
SCREAMING_SNAKE_CASE__ : int = layerscale_value
SCREAMING_SNAKE_CASE__ : Optional[int] = use_post_layernorm
SCREAMING_SNAKE_CASE__ : Any = use_post_layernorm_in_modulation
SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize_modulator
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = encoder_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 12 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
a :Dict = logging.getLogger(__name__)
a :int = {"facebook/bart-base": BartForConditionalGeneration}
a :List[str] = {"facebook/bart-base": BartTokenizer}
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=__lowerCAmelCase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__lowerCAmelCase , )
parser.add_argument(
"""--config_name""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=__lowerCAmelCase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="""Where to store the final ONNX file.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
return args
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase="cpu" ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
return huggingface_model, tokenizer
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : str = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = """My friends are cool but they eat too many carbs."""
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
SCREAMING_SNAKE_CASE__ : int = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=__lowerCAmelCase , )
logger.info("""Model exported to {}""".format(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Dict = onnxruntime.InferenceSession(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = ort_sess.run(
__lowerCAmelCase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__lowerCAmelCase ),
"""max_length""": np.array(__lowerCAmelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parse_args()
SCREAMING_SNAKE_CASE__ : List[Any] = 5
SCREAMING_SNAKE_CASE__ : Dict = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE__ : Tuple = torch.device(args.device )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__lowerCAmelCase )
if args.max_length:
SCREAMING_SNAKE_CASE__ : Dict = args.max_length
if args.num_beams:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.num_beams
if args.output_file_path:
SCREAMING_SNAKE_CASE__ : List[Any] = args.output_file_path
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 12 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __a (unittest.TestCase):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE__ : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : int = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = True
_SCREAMING_SNAKE_CASE :Optional[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxRoFormerModelTester(self )
@slow
def _a ( self ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a )
SCREAMING_SNAKE_CASE__ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __a (unittest.TestCase):
'''simple docstring'''
@slow
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
SCREAMING_SNAKE_CASE__ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : str = model(_a )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = 50_000
SCREAMING_SNAKE_CASE__ : Optional[Any] = (1, 6, vocab_size)
self.assertEqual(output.shape , _a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 12 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = LxmertTokenizer
_SCREAMING_SNAKE_CASE :List[Any] = LxmertTokenizerFast
_SCREAMING_SNAKE_CASE :Optional[Any] = True
_SCREAMING_SNAKE_CASE :List[Any] = True
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE__ : str = """unwanted, running"""
return input_text, output_text
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def _a ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = """I was born in 92000, and this is falsรฉ."""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.tokenize(_a )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
SCREAMING_SNAKE_CASE__ : int = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(_a )
SCREAMING_SNAKE_CASE__ : int = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 12 |
"""simple docstring"""
a :List[str] = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
while place < len(__lowerCAmelCase ):
if (place + 1 < len(__lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : List[str] = divmod(__lowerCAmelCase , __lowerCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 1 |
"""simple docstring"""
# Imports
import numpy as np
class __a :
'''simple docstring'''
def __init__( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Dict:
"""simple docstring"""
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
def _a ( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Optional[Any]:
"""simple docstring"""
if red is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = red
if green is not None:
SCREAMING_SNAKE_CASE__ : Tuple = green
if blue is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = blue
if red_edge is not None:
SCREAMING_SNAKE_CASE__ : List[str] = red_edge
if nir is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = nir
return True
def _a ( self , _a="" , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Optional[Any]:
"""simple docstring"""
self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a )
SCREAMING_SNAKE_CASE__ : Dict = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _a ( self ) -> List[Any]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def _a ( self ) -> int:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _a ( self ) -> List[str]:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def _a ( self ) -> List[str]:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def _a ( self ) -> Dict:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _a ( self ) -> List[Any]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _a ( self , _a=0.08 , _a=1.22 , _a=0.03 ) -> List[str]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _a ( self ) -> List[str]:
"""simple docstring"""
return (self.nir / self.green) - 1
def _a ( self ) -> Tuple:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def _a ( self ) -> List[Any]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _a ( self ) -> Tuple:
"""simple docstring"""
return self.nir - self.green
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _a ( self , _a=0.16 ) -> str:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def _a ( self , _a=0.5 ) -> Optional[Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _a ( self ) -> List[Any]:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _a ( self , _a=None , _a=None ) -> Optional[int]:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def _a ( self ) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def _a ( self ) -> int:
"""simple docstring"""
return self.nir / self.red
def _a ( self ) -> Dict:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _a ( self ) -> List[str]:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def _a ( self ) -> Tuple:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def _a ( self ) -> str:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def _a ( self ) -> List[Any]:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
SCREAMING_SNAKE_CASE__ : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _a ( self ) -> List[Any]:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _a ( self ) -> Dict:
"""simple docstring"""
return self.nir / self.red
def _a ( self ) -> Optional[int]:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a :Any = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Union[str, Any] = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[Any] = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :List[Any] = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 12 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Dict = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = (32, 32)
SCREAMING_SNAKE_CASE__ : Any = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _a ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_a )
@property
def _a ( self ) -> Optional[int]:
"""simple docstring"""
def extract(*_a , **_a ):
class __a :
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = torch.ones([0] )
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
self.pixel_values.to(_a )
return self
return Out()
return extract
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
SCREAMING_SNAKE_CASE__ : str = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Dict = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_a , )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : int = PNDMScheduler(skip_prk_steps=_a )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ : int = output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_a , )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=_a )
assert isinstance(_a , _a )
assert isinstance(pipe.scheduler , _a )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__ : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Dict = StableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__ : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_a )
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
SCREAMING_SNAKE_CASE__ : Optional[int] = unet.half()
SCREAMING_SNAKE_CASE__ : Tuple = vae.half()
SCREAMING_SNAKE_CASE__ : Dict = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
SCREAMING_SNAKE_CASE__ : Any = 4_003_660_346
SCREAMING_SNAKE_CASE__ : str = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : Tuple = output.images
SCREAMING_SNAKE_CASE__ : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Tuple = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : int = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_a )
SCREAMING_SNAKE_CASE__ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : List[str] = """padme amidala taking a bath artwork, safe for work, no nudity"""
SCREAMING_SNAKE_CASE__ : Tuple = 2_734_971_755
SCREAMING_SNAKE_CASE__ : int = 7
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : str = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images
SCREAMING_SNAKE_CASE__ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Any = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_044_355_234
SCREAMING_SNAKE_CASE__ : List[str] = 12
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 12 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _a ( self , **_a ) -> List[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _a ( self , **_a ) -> List[Any]:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def _a ( self , **_a ) -> Any:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
SCREAMING_SNAKE_CASE__ : int = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(_a , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(images=_a , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(text=_a )
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """lower newer"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : List[Any] = processor.batch_decode(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Dict = AlignProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 12 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(__lowerCAmelCase ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a :Optional[Any] = logging.get_logger(__name__)
a :Union[str, Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = """t5"""
_SCREAMING_SNAKE_CASE :List[str] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE :Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , _a=32_128 , _a=512 , _a=64 , _a=2_048 , _a=6 , _a=None , _a=8 , _a=32 , _a=128 , _a=0.1 , _a=1E-6 , _a=1.0 , _a="relu" , _a=True , _a=True , _a=0 , _a=1 , **_a , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : int = d_kv
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_ff
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_layers
SCREAMING_SNAKE_CASE__ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE__ : Tuple = num_heads
SCREAMING_SNAKE_CASE__ : Dict = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : str = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_factor
SCREAMING_SNAKE_CASE__ : Tuple = feed_forward_proj
SCREAMING_SNAKE_CASE__ : str = use_cache
SCREAMING_SNAKE_CASE__ : List[str] = self.feed_forward_proj.split("""-""" )
SCREAMING_SNAKE_CASE__ : Dict = act_info[-1]
SCREAMING_SNAKE_CASE__ : str = act_info[0] == """gated"""
if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE__ : List[Any] = """gelu_new"""
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , )
class __a (UpperCamelCase_):
'''simple docstring'''
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
SCREAMING_SNAKE_CASE__ : Tuple = """past_encoder_sequence + sequence"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch"""}
SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 1: """decoder_sequence"""}
SCREAMING_SNAKE_CASE__ : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a , direction="""inputs""" )
return common_inputs
@property
def _a ( self ) -> int:
"""simple docstring"""
return 13
| 12 | 1 |
"""simple docstring"""
import unittest
import numpy as np
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ : Any = np.shape(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = np.shape(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = np.shape(__lowerCAmelCase )
if shape_a[0] != shape_b[0]:
SCREAMING_SNAKE_CASE__ : int = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__lowerCAmelCase )
if shape_b[1] != shape_c[1]:
SCREAMING_SNAKE_CASE__ : Any = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = pseudo_inv
if a_inv is None:
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.linalg.inv(__lowerCAmelCase )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ : str = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([[2, 1], [6, 3]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = schur_complement(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.block([[a, b], [b.T, c]] )
SCREAMING_SNAKE_CASE__ : int = np.linalg.det(_a )
SCREAMING_SNAKE_CASE__ : str = np.linalg.det(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.linalg.det(_a )
self.assertAlmostEqual(_a , det_a * det_s )
def _a ( self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ : int = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_a ):
schur_complement(_a , _a , _a )
def _a ( self ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ : int = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_a ):
schur_complement(_a , _a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 12 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
a :Optional[Any] = [8, 5, 9, 7]
a :List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a :int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = claim_vector
SCREAMING_SNAKE_CASE__ : Any = allocated_resources_table
SCREAMING_SNAKE_CASE__ : Any = maximum_claim_table
def _a ( self ) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _a ( self ) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _a ( self ) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _a ( self ) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_a ): i for i in self.__need()}
def _a ( self , **_a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.__need()
SCREAMING_SNAKE_CASE__ : Any = self.__allocated_resources_table
SCREAMING_SNAKE_CASE__ : Dict = self.__available_resources()
SCREAMING_SNAKE_CASE__ : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
SCREAMING_SNAKE_CASE__ : List[str] = False
for each_need in need_list:
SCREAMING_SNAKE_CASE__ : Dict = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
SCREAMING_SNAKE_CASE__ : Optional[int] = False
break
if execution:
SCREAMING_SNAKE_CASE__ : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
SCREAMING_SNAKE_CASE__ : Tuple = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
SCREAMING_SNAKE_CASE__ : Dict = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_a ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def _a ( self ) -> Any:
"""simple docstring"""
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(_a ) + 1}'''
+ """ """.join(f'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(_a ) + 1}'''
+ """ """.join(f'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_a ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 1 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (CMStochasticIterativeScheduler,)
_SCREAMING_SNAKE_CASE :Tuple = 10
def _a ( self , **_a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**_a )
return config
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 10
SCREAMING_SNAKE_CASE__ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
SCREAMING_SNAKE_CASE__ : Any = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE__ : int = self.dummy_sample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_a , _a , _a ).prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.step(_a , _a , _a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a ( self ) -> List[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : List[Any] = 1
scheduler.set_timesteps(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.timesteps
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ : List[Any] = model(_a , _a )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
SCREAMING_SNAKE_CASE__ : List[str] = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Tuple = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : str = [106, 0]
scheduler.set_timesteps(timesteps=_a )
SCREAMING_SNAKE_CASE__ : str = scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.scale_model_input(_a , _a )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , _a )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
SCREAMING_SNAKE_CASE__ : Any = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.sum(torch.abs(_a ) )
SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : str = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : List[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(_a , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE__ : Optional[int] = len(_a )
with self.assertRaises(_a , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Tuple = scheduler_class(**_a )
SCREAMING_SNAKE_CASE__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_a )
| 12 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a :List[Any] = None
a :Optional[int] = logging.get_logger(__name__)
a :Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a :Optional[int] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
a :Dict = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
a :int = "โ"
# Segments (not really needed)
a :Dict = 0
a :Optional[int] = 1
a :Tuple = 2
a :List[str] = 3
a :Optional[Any] = 4
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :str = """left"""
_SCREAMING_SNAKE_CASE :Optional[Any] = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE__ : List[str] = remove_space
SCREAMING_SNAKE_CASE__ : int = keep_accents
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Tuple = False if not self.vocab_file else True
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 12 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=1_00_00 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=7_50 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""})
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=5_00_00 , metadata={"""help""": """Maximum number of training steps."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=10_24 , metadata={"""help""": """Sequence lengths used for training."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=10_24 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default=UpperCamelCase_ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""})
_SCREAMING_SNAKE_CASE :Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """If True the data is pretokenized."""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=10_24 , metadata={"""help""": """Length of sequences to be evaluated."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=UpperCamelCase_ , metadata={"""help""": """Number of workers used for code evaluation."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """Sample from the language model's output distribution."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=2_56 , metadata={"""help""": """Maximum number of newly generated tokens."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=2_00 , metadata={"""help""": """Number of completions to generate for each sample."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=UpperCamelCase_ , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=10_00_00 , metadata={"""help""": """Number of files to save per JSON output file."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=10_00 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=1_00 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
_SCREAMING_SNAKE_CASE :Optional[bool] = field(
default=UpperCamelCase_ , metadata={"""help""": """If True, near-duplicate samples are removed."""})
_SCREAMING_SNAKE_CASE :Optional[float] = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=20_00_00 , metadata={"""help""": """Number of examples to train tokenizer on."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(
default=3_27_68 , metadata={"""help""": """Number of examples to train the tokenizer on."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""})
_SCREAMING_SNAKE_CASE :Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """Push saved tokenizer to the hub."""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""})
_SCREAMING_SNAKE_CASE :Optional[int] = field(default=UpperCamelCase_ , metadata={"""help""": """Number of workers used for code evaluation."""})
@dataclass
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""})
_SCREAMING_SNAKE_CASE :Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""})
_SCREAMING_SNAKE_CASE :Optional[bool] = field(default=UpperCamelCase_ , metadata={"""help""": """Push saved tokenizer to the hub."""})
| 12 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(__lowerCAmelCase ) + 1
SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
SCREAMING_SNAKE_CASE__ : Dict = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
SCREAMING_SNAKE_CASE__ : Dict = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
SCREAMING_SNAKE_CASE__ : Any = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
SCREAMING_SNAKE_CASE__ : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
SCREAMING_SNAKE_CASE__ : List[Any] = dp[i - 1][j]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
else:
SCREAMING_SNAKE_CASE__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a :Any = "aab"
a :Optional[Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'{input_string} matches the given pattern {pattern}')
else:
print(f'{input_string} does not match with the given pattern {pattern}')
| 12 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a :Optional[int] = 2
class __a :
'''simple docstring'''
def __init__( self , *, # begin keyword-only arguments
_a="<s>" , _a="<pad>" , _a="</s>" , _a="<unk>" , _a=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = bos, unk, pad, eos
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.add_symbol(_a )
SCREAMING_SNAKE_CASE__ : Tuple = self.add_symbol(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.add_symbol(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.add_symbol(_a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.symbols )
def __eq__( self , _a ) -> Union[str, Any]:
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , _a ) -> List[str]:
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> str:
"""simple docstring"""
return len(self.symbols )
def __contains__( self , _a ) -> Optional[Any]:
"""simple docstring"""
return sym in self.indices
@classmethod
def _a ( cls , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cls()
d.add_from_file(_a )
return d
def _a ( self , _a , _a=1 , _a=False ) -> Optional[Any]:
"""simple docstring"""
if word in self.indices and not overwrite:
SCREAMING_SNAKE_CASE__ : str = self.indices[word]
SCREAMING_SNAKE_CASE__ : Any = self.count[idx] + n
return idx
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(self.symbols )
SCREAMING_SNAKE_CASE__ : List[str] = idx
self.symbols.append(_a )
self.count.append(_a )
return idx
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
return 0
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if isinstance(_a , _a ):
try:
with open(_a , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(_a ) )
return
SCREAMING_SNAKE_CASE__ : Any = f.readlines()
SCREAMING_SNAKE_CASE__ : Dict = self._load_meta(_a )
for line in lines[indices_start_line:]:
try:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = line.rsplit(""" """ , 1 )
else:
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : str = int(_a )
SCREAMING_SNAKE_CASE__ : List[str] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(_a ) )
self.add_symbol(_a , n=_a , overwrite=_a )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def _lowercase ( __lowerCAmelCase ) -> Dict:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
SCREAMING_SNAKE_CASE__ : int = dict((re.sub(r"""@@$""" , """""" , __lowerCAmelCase ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , __lowerCAmelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
SCREAMING_SNAKE_CASE__ : str = d[k] # restore
return da
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
# prep
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(__lowerCAmelCase , """checkpoint.pt""" )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(__lowerCAmelCase , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = chkpt["""cfg"""]["""model"""]
# dicts
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """dict.txt""" )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
SCREAMING_SNAKE_CASE__ : int = Dictionary.load(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE__ : Tuple = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(__lowerCAmelCase , """bpecodes""" )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """config.json""" )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1E-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chkpt["""model"""]
# remove unneeded keys
SCREAMING_SNAKE_CASE__ : Tuple = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
SCREAMING_SNAKE_CASE__ : Tuple = model_state_dict.pop(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[str] = model_state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = BioGptConfig.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
a :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a :Dict = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 |
"""simple docstring"""
from math import sqrt
def _lowercase ( __lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( __lowerCAmelCase = 1_0001 ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'{solution() = }')
| 12 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __a :
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = MBartConfig
_SCREAMING_SNAKE_CASE :int = {}
_SCREAMING_SNAKE_CASE :int = """gelu"""
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=False , _a=99 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=20 , _a=2 , _a=1 , _a=0 , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Any = seq_length
SCREAMING_SNAKE_CASE__ : int = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : int = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : str = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : int = prepare_mbart_inputs_dict(_a , _a , _a )
return config, inputs_dict
def _a ( self , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TFMBartModel(config=_a ).get_decoder()
SCREAMING_SNAKE_CASE__ : Tuple = inputs_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Any = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : int = inputs_dict["""attention_mask"""][:1, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict["""head_mask"""]
SCREAMING_SNAKE_CASE__ : Any = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
SCREAMING_SNAKE_CASE__ : str = past_key_values[1]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> Any:
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE :List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE :Optional[int] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE :str = True
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
def _a ( self , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFMBartModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = ConfigTester(self , config_class=_a )
def _a ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_sentencepiece
@require_tokenizers
@require_tf
class __a (unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
_SCREAMING_SNAKE_CASE :str = [
"""ลeful ONU declarฤ cฤ nu existฤ o soluลฃie militarฤ รฎn Siria""",
]
_SCREAMING_SNAKE_CASE :Optional[int] = """facebook/mbart-large-en-ro"""
@cached_property
def _a ( self ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self , **_a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.translate_src_text(**_a )
self.assertListEqual(self.expected_text , _a )
def _a ( self , **_a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(self.src_text , **_a , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(_a , skip_special_tokens=_a )
return generated_words
@slow
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 12 |
"""simple docstring"""
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
SCREAMING_SNAKE_CASE__ : List[Any] = weight
def __repr__( self ) -> List[Any]:
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def _a ( self ) -> Dict:
"""simple docstring"""
return self.value
def _a ( self ) -> int:
"""simple docstring"""
return self.name
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return self.weight
def _a ( self ) -> Dict:
"""simple docstring"""
return self.value / self.weight
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = []
for i in range(len(__lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sorted(__lowerCAmelCase , key=__lowerCAmelCase , reverse=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0, 0.0
for i in range(len(__lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _lowercase ( ) -> List[str]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
while len(__lowerCAmelCase ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : str = """0""" + bin_string
SCREAMING_SNAKE_CASE__ : List[str] = [
bin_string[index : index + 3]
for index in range(len(__lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for index, val in enumerate(__lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCAmelCase ) )
oct_string += str(__lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 12 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a :Optional[int] = None
a :Optional[Any] = logging.get_logger(__name__)
a :Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
a :Union[str, Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
a :Any = {
"facebook/nllb-large-en-ro": 1_024,
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
a :Tuple = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :int = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE :Tuple = NllbTokenizer
_SCREAMING_SNAKE_CASE :List[int] = []
_SCREAMING_SNAKE_CASE :List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
SCREAMING_SNAKE_CASE__ : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE__ : str = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : List[str] = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ : Dict = src_lang if src_lang is not None else """eng_Latn"""
SCREAMING_SNAKE_CASE__ : List[str] = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , _a , _a , _a , _a , **_a ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ : Dict = src_lang
SCREAMING_SNAKE_CASE__ : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_tokens_to_ids(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = tgt_lang_id
return inputs
def _a ( self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = src_lang
SCREAMING_SNAKE_CASE__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> str:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Dict = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : int = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 12 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
a :Optional[int] = list[tuple[int, int]]
a :Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a :Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = pos_x
SCREAMING_SNAKE_CASE__ : Tuple = pos_y
SCREAMING_SNAKE_CASE__ : Optional[int] = (pos_y, pos_x)
SCREAMING_SNAKE_CASE__ : Optional[int] = goal_x
SCREAMING_SNAKE_CASE__ : List[Any] = goal_y
SCREAMING_SNAKE_CASE__ : Dict = parent
class __a :
'''simple docstring'''
def __init__( self , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , _a )
SCREAMING_SNAKE_CASE__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.start]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def _a ( self ) -> Path | None:
"""simple docstring"""
while self.node_queue:
SCREAMING_SNAKE_CASE__ : str = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE__ : Tuple = True
return self.retrace_path(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.get_successors(_a )
for node in successors:
self.node_queue.append(_a )
if not self.reached:
return [self.start.pos]
return None
def _a ( self , _a ) -> list[Node]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for action in delta:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_a , _a , self.target.pos_y , self.target.pos_x , _a ) )
return successors
def _a ( self , _a ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node
SCREAMING_SNAKE_CASE__ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE__ : Optional[int] = current_node.parent
path.reverse()
return path
class __a :
'''simple docstring'''
def __init__( self , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BreadthFirstSearch(_a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BreadthFirstSearch(_a , _a )
SCREAMING_SNAKE_CASE__ : List[str] = False
def _a ( self ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE__ : str = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE__ : str = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE__ : List[str] = True
return self.retrace_bidirectional_path(
_a , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_bwd_node
SCREAMING_SNAKE_CASE__ : Tuple = current_fwd_node
SCREAMING_SNAKE_CASE__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(_a ),
self.bwd_bfs: self.bwd_bfs.get_successors(_a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _a ( self , _a , _a ) -> Path:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.fwd_bfs.retrace_path(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.bwd_bfs.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE__ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a :Union[str, Any] = (0, 0)
a :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a :Dict = time.time()
a :Any = BreadthFirstSearch(init, goal)
a :str = bfs.search()
a :Union[str, Any] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
a :Union[str, Any] = time.time()
a :int = BidirectionalBreadthFirstSearch(init, goal)
a :int = bd_bfs.search()
a :int = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 12 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Any:
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
SCREAMING_SNAKE_CASE__ : str = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
SCREAMING_SNAKE_CASE__ : Dict = fmt.format(__lowerCAmelCase )
# Print and recurse (if needed).
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if msg is not None:
print(__lowerCAmelCase )
for k in val.keys():
recursive_print(__lowerCAmelCase , val[k] , spaces + 2 )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
print(__lowerCAmelCase , """:""" , val.size() )
else:
print(__lowerCAmelCase , """:""" , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE__ : Tuple = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE__ : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : List[str] = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE__ : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE__ : Dict = param.view(*__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = param.view(*__lowerCAmelCase )
return param
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
# The converted output model.
SCREAMING_SNAKE_CASE__ : List[str] = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE__ : List[str] = input_state_dict.get("""args""" , __lowerCAmelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds_args.num_layers
SCREAMING_SNAKE_CASE__ : Dict = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE__ : List[str] = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE__ : str = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_state_dict["""checkpoint_version"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = 0.0
# The model.
SCREAMING_SNAKE_CASE__ : Any = input_state_dict["""model"""]
# The language model.
SCREAMING_SNAKE_CASE__ : Any = model["""language_model"""]
# The embeddings.
SCREAMING_SNAKE_CASE__ : str = lm["""embedding"""]
# The word embeddings.
SCREAMING_SNAKE_CASE__ : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE__ : Any = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE__ : Any = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE__ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
SCREAMING_SNAKE_CASE__ : List[Any] = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE__ : str = layer_re.match(__lowerCAmelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE__ : Dict = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE__ : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE__ : str = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE__ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
SCREAMING_SNAKE_CASE__ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
SCREAMING_SNAKE_CASE__ : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE__ : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = masked_bias
SCREAMING_SNAKE_CASE__ : List[str] = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE__ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE__ : Dict = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : Any = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase )
# Store. No change of shape.
SCREAMING_SNAKE_CASE__ : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE__ : str = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE__ : int = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE__ : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = transformer["""final_layernorm.weight"""]
SCREAMING_SNAKE_CASE__ : str = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE__ : Tuple = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ) -> List[Any]:
# Create the argument parser.
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__lowerCAmelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__lowerCAmelCase , help="""An optional config json file describing the pre-trained model.""" , )
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : str = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : int = input_state_dict.get("""args""" , __lowerCAmelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE__ : Dict = """gelu_fast"""
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu_new"""
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE__ : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE__ : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__lowerCAmelCase , __lowerCAmelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE__ : Tuple = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE__ : Any = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE__ : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """gpt2"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type(__lowerCAmelCase ).__name__
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__lowerCAmelCase )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(__lowerCAmelCase )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 12 | 1 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a :Optional[int] = logging.getLogger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a , _a , _a=None , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.layer[current_layer](_a , _a , head_mask[current_layer] )
SCREAMING_SNAKE_CASE__ : List[str] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , UpperCamelCase_ , )
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a ) -> Dict:
"""simple docstring"""
super().__init__(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertEncoderWithPabee(_a )
self.init_weights()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : str = 0
def _a ( self , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = threshold
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = patience
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.inference_layers_num / self.inference_instances_num
SCREAMING_SNAKE_CASE__ : Optional[int] = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def _a ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=False , ) -> List[str]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
SCREAMING_SNAKE_CASE__ : Any = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.ones(_a , device=_a )
if token_type_ids is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE__ : torch.Tensor = self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = encoder_hidden_states.size()
SCREAMING_SNAKE_CASE__ : int = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.ones(_a , device=_a )
SCREAMING_SNAKE_CASE__ : int = self.invert_attention_mask(_a )
else:
SCREAMING_SNAKE_CASE__ : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_head_mask(_a , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE__ : List[str] = self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = embedding_output
if self.training:
SCREAMING_SNAKE_CASE__ : Any = []
for i in range(self.config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : List[str] = self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.pooler(_a )
SCREAMING_SNAKE_CASE__ : Dict = output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
SCREAMING_SNAKE_CASE__ : str = self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooler(encoder_outputs[0] )
SCREAMING_SNAKE_CASE__ : str = [output_layers[self.config.num_hidden_layers - 1](_a )]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
SCREAMING_SNAKE_CASE__ : Dict = self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.pooler(_a )
SCREAMING_SNAKE_CASE__ : Tuple = output_layers[i](_a )
if regression:
SCREAMING_SNAKE_CASE__ : Tuple = logits.detach()
if patient_result is not None:
SCREAMING_SNAKE_CASE__ : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
SCREAMING_SNAKE_CASE__ : List[str] = 0
else:
SCREAMING_SNAKE_CASE__ : List[Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = logits
if patient_counter == self.patience:
break
SCREAMING_SNAKE_CASE__ : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , UpperCamelCase_ , )
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
SCREAMING_SNAKE_CASE__ : str = config.num_labels
SCREAMING_SNAKE_CASE__ : List[str] = BertModelWithPabee(_a )
SCREAMING_SNAKE_CASE__ : Any = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def _a ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
SCREAMING_SNAKE_CASE__ : List[str] = (logits[-1],)
if labels is not None:
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = 0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MSELoss()
SCREAMING_SNAKE_CASE__ : Dict = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Dict = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
SCREAMING_SNAKE_CASE__ : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
SCREAMING_SNAKE_CASE__ : List[Any] = (total_loss / total_weights,) + outputs
return outputs
| 12 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
with open(_a , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_file.read()
SCREAMING_SNAKE_CASE__ : str = regexp.search(_a )
return match
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
with open(_a , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
SCREAMING_SNAKE_CASE__ : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ : Dict = regexp.finditer(_a )
SCREAMING_SNAKE_CASE__ : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_a ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ : List[str] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_a ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 12 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 12 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
'''simple docstring'''
def __init__( self , _a , _a=99 , _a=13 , _a=7 , _a=9 , _a=True , _a=True , _a=False , _a=32 , _a=5 , _a=4 , _a=37 , _a=8 , _a=0.1 , _a=0.002 , _a=1 , _a=0 , _a=0 , _a=None , _a=None , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = encoder_seq_length
SCREAMING_SNAKE_CASE__ : str = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[int] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_attention_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = d_ff
SCREAMING_SNAKE_CASE__ : Any = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor
SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : str = decoder_layers
def _a ( self ) -> Tuple:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _a ( self , _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Any:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
SCREAMING_SNAKE_CASE__ : List[str] = config.num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_inputs_dict(_a , _a , _a )
return config, input_dict
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = UMTaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(
input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(input_ids=_a , decoder_input_ids=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = result.last_hidden_state
SCREAMING_SNAKE_CASE__ : Dict = result.past_key_values
SCREAMING_SNAKE_CASE__ : Any = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).get_decoder().to(_a ).eval()
# first forward pass
SCREAMING_SNAKE_CASE__ : str = model(_a , use_cache=_a )
SCREAMING_SNAKE_CASE__ : str = model(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , past_key_values=_a )["""last_hidden_state"""]
# select random slice
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def _a ( self , _a , _a , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).to(_a ).half().eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_a )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_a ).any().item() )
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE :Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :List[str] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :Tuple = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :List[Any] = True
_SCREAMING_SNAKE_CASE :List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_SCREAMING_SNAKE_CASE :Union[str, Any] = [0.8, 0.9]
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Dict = UMTaModel(config_and_inputs[0] ).to(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_a )
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = config_and_inputs[0]
SCREAMING_SNAKE_CASE__ : Tuple = UMTaForConditionalGeneration(_a ).eval()
model.to(_a )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_a ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ),
}
for attn_name, (name, mask) in zip(_a , head_masking.items() ):
SCREAMING_SNAKE_CASE__ : List[str] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE__ : str = torch.ones(
config.num_decoder_layers , config.num_heads , device=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE__ : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _a ( self ) -> Dict:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_a ).to(_a )
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_a , legacy=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_a , return_tensors="""pt""" , padding=_a ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_a , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids.to(_a ) )
SCREAMING_SNAKE_CASE__ : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(_a )
self.assertEqual(_a , _a )
| 12 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = i
for k in range(i + 1 , __lowerCAmelCase ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE__ : Optional[int] = k
if least != i:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a :List[str] = input("Enter numbers separated by a comma:\n").strip()
a :List[Any] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 12 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
SCREAMING_SNAKE_CASE__ : List[Any] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
SCREAMING_SNAKE_CASE__ : str = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Any:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
SCREAMING_SNAKE_CASE__ : int = dataset
SCREAMING_SNAKE_CASE__ : Any = name
SCREAMING_SNAKE_CASE__ : Optional[Any] = con
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : int = num_proc
SCREAMING_SNAKE_CASE__ : int = to_sql_kwargs
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.to_sql_kwargs.pop("""sql""" , _a )
SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""con""" , _a )
SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""index""" , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(index=_a , **self.to_sql_kwargs )
return written
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = args
SCREAMING_SNAKE_CASE__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
SCREAMING_SNAKE_CASE__ : Any = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : Optional[int] = batch.to_pandas()
SCREAMING_SNAKE_CASE__ : List[Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def _a ( self , _a , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 12 | 1 |
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a :str = get_logger(__name__)
class __a (enum.Enum):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = """all_checks"""
_SCREAMING_SNAKE_CASE :Union[str, Any] = """basic_checks"""
_SCREAMING_SNAKE_CASE :List[str] = """no_checks"""
class __a (UpperCamelCase_):
'''simple docstring'''
class __a (UpperCamelCase_):
'''simple docstring'''
class __a (UpperCamelCase_):
'''simple docstring'''
class __a (UpperCamelCase_):
'''simple docstring'''
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[int]:
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE__ : str = """ for """ + verification_name if verification_name is not None else """"""
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __a (UpperCamelCase_):
'''simple docstring'''
class __a (UpperCamelCase_):
'''simple docstring'''
class __a (UpperCamelCase_):
'''simple docstring'''
class __a (UpperCamelCase_):
'''simple docstring'''
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : str = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info("""All the splits matched successfully.""" )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = True ) -> dict:
if record_checksum:
SCREAMING_SNAKE_CASE__ : Optional[int] = shaaaa()
with open(__lowerCAmelCase , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE__ : Any = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def _lowercase ( __lowerCAmelCase ) -> List[str]:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 12 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : int = 1
while repunit:
SCREAMING_SNAKE_CASE__ : str = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowercase ( __lowerCAmelCase = 100_0000 ) -> int:
SCREAMING_SNAKE_CASE__ : Dict = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 12 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.