code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : Optional[int] = 1_3
A_ : List[str] = 7
A_ : List[Any] = True
A_ : Optional[int] = True
A_ : Dict = True
A_ : List[str] = True
A_ : Dict = 9_9
A_ : Optional[Any] = 3_8_4
A_ : str = 2
A_ : Union[str, Any] = 4
A_ : List[str] = 3_7
A_ : str = 'gelu'
A_ : Optional[int] = 0.1
A_ : int = 0.1
A_ : List[str] = 5_1_2
A_ : Dict = 1_6
A_ : Union[str, Any] = 2
A_ : Tuple = 0.02
A_ : Any = 3
A_ : List[Any] = 4
A_ : str = 1_2_8
A_ : List[str] = 2
A_ : List[str] = 9
A_ : Union[str, Any] = 1
A_ : Dict = None
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[str] = None
if self.use_input_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Tuple = None
if self.use_token_type_ids:
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : int = None
A_ : Any = None
A_ : int = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = TFConvBertModel(config=snake_case_ )
A_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A_ : int = [input_ids, input_mask]
A_ : str = model(snake_case_ )
A_ : Any = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = TFConvBertForMaskedLM(config=snake_case_ )
A_ : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A_ : str = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : int = TFConvBertForSequenceClassification(config=snake_case_ )
A_ : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A_ : Any = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = self.num_choices
A_ : Union[str, Any] = TFConvBertForMultipleChoice(config=snake_case_ )
A_ : str = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A_ : str = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A_ : Optional[int] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
A_ : Union[str, Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A_ : Any = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : str = self.num_labels
A_ : Union[str, Any] = TFConvBertForTokenClassification(config=snake_case_ )
A_ : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A_ : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = TFConvBertForQuestionAnswering(config=snake_case_ )
A_ : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A_ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : List[Any] = config_and_inputs
A_ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : List[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase_ : Optional[Any] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : Optional[int] = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = TFConvBertModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Union[str, Any] = True
A_ : str = True
if hasattr(snake_case_ , 'use_cache' ):
A_ : Optional[int] = True
A_ : Optional[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A_ : Tuple = getattr(self.model_tester , 'key_length' , snake_case_ )
for model_class in self.all_model_classes:
A_ : Union[str, Any] = self._prepare_for_class(snake_case_ , snake_case_ )
A_ : Tuple = model_class(snake_case_ )
A_ : Union[str, Any] = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
A_ : Any = os.path.join(snake_case_ , 'saved_model' , '1' )
A_ : Tuple = tf.keras.models.load_model(snake_case_ )
A_ : Optional[int] = model(snake_case_ )
if self.is_encoder_decoder:
A_ : str = outputs['encoder_hidden_states']
A_ : Tuple = outputs['encoder_attentions']
else:
A_ : int = outputs['hidden_states']
A_ : Any = outputs['attentions']
self.assertEqual(len(snake_case_ ) , snake_case_ )
A_ : List[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = True
A_ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
A_ : Union[str, Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A_ : Union[str, Any] = getattr(self.model_tester , 'key_length' , snake_case_ )
A_ : str = getattr(self.model_tester , 'key_length' , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
A_ : str = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
A_ : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
A_ : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A_ : List[str] = True
A_ : Any = False
A_ : Dict = model_class(snake_case_ )
A_ : int = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
A_ : List[Any] = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
A_ : Any = model_class(snake_case_ )
A_ : Optional[Any] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A_ : List[Any] = True
A_ : Optional[Any] = model_class(snake_case_ )
A_ : Optional[int] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
A_ : Union[str, Any] = True
A_ : Dict = True
A_ : List[str] = model_class(snake_case_ )
A_ : Any = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
A_ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ : List[Any] = model(snake_case_ )[0]
A_ : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , snake_case_ )
A_ : List[Any] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 286 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """xmod"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=("en_XX",) , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : int = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Any = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : int = use_cache
A_ : Dict = classifier_dropout
A_ : int = pre_norm
A_ : Optional[Any] = adapter_reduction_factor
A_ : List[Any] = adapter_layer_norm
A_ : int = adapter_reuse_layer_norm
A_ : Dict = ln_before_adapter
A_ : List[str] = list(snake_case_ )
A_ : Union[str, Any] = default_language
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 286 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class UpperCamelCase__ ( _a ):
"""simple docstring"""
UpperCAmelCase_ =field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase_ =Features({"question": Value("string" ), "context": Value("string" )} )
UpperCAmelCase_ =Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
UpperCAmelCase_ ="question"
UpperCAmelCase_ ="context"
UpperCAmelCase_ ="answers"
@property
def _UpperCamelCase ( self ) -> str:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 371 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__UpperCAmelCase = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
__UpperCAmelCase = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
__UpperCAmelCase = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCamelCase ( self , _A , _A , _A=4 , _A=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ = compute_bleu(
reference_corpus=_A , translation_corpus=_A , max_order=_A , smooth=_A )
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 257 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Union[str, Any]=3 ,lowerCamelCase__ : str=32 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : Dict=10 ,lowerCamelCase__ : Optional[Any]=[10, 20, 30, 40] ,lowerCamelCase__ : Dict=[1, 1, 2, 1] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : List[str]="relu" ,lowerCamelCase__ : str=3 ,lowerCamelCase__ : Union[str, Any]=None ,) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embeddings_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = len(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Optional[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case : int = False
__snake_case : Optional[Any] = False
__snake_case : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ):
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Dict ):
return model(pixel_values=lowerCamelCase__ ,**lowerCamelCase__ )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE = (1, 1000)
self.assertEqual(outputs.logits.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
| 296 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = "efficientformer"
def __init__( self : Optional[int] ,lowerCamelCase__ : List[int] = [3, 2, 6, 4] ,lowerCamelCase__ : List[int] = [48, 96, 224, 448] ,lowerCamelCase__ : List[bool] = [True, True, True, True] ,lowerCamelCase__ : int = 448 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 7 ,lowerCamelCase__ : int = 5 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 16 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 0.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : float = 1e-5 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.02 ,lowerCamelCase__ : float = 1e-1_2 ,lowerCamelCase__ : int = 224 ,lowerCamelCase__ : float = 1e-0_5 ,**lowerCamelCase__ : str ,) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_expansion_ratio
SCREAMING_SNAKE_CASE = downsamples
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = resolution
SCREAMING_SNAKE_CASE = pool_size
SCREAMING_SNAKE_CASE = downsample_patch_size
SCREAMING_SNAKE_CASE = downsample_stride
SCREAMING_SNAKE_CASE = downsample_pad
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = num_metaad_blocks
SCREAMING_SNAKE_CASE = distillation
SCREAMING_SNAKE_CASE = use_layer_scale
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = batch_norm_eps
| 296 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Any ) -> Optional[Any]:
UpperCamelCase : Any = nn.functional.normalize(snake_case__ )
UpperCamelCase : Optional[Any] = nn.functional.normalize(snake_case__ )
return torch.mm(snake_case__ , normalized_text_embeds.t() )
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Tuple = CLIPConfig
UpperCAmelCase__ : Optional[Any] = ["CLIPEncoderLayer"]
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : Any = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17, config.projection_dim ), requires_grad=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = nn.Parameter(torch.ones(3, config.projection_dim ), requires_grad=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.Parameter(torch.ones(17 ), requires_grad=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = nn.Parameter(torch.ones(3 ), requires_grad=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase : Optional[int] = self.vision_model(SCREAMING_SNAKE_CASE_ )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(SCREAMING_SNAKE_CASE_ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Union[str, Any] = cosine_distance(SCREAMING_SNAKE_CASE_, self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[str] = cosine_distance(SCREAMING_SNAKE_CASE_, self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : str = []
UpperCamelCase : Dict = image_embeds.shape[0]
for i in range(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : int = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : Optional[Any] = special_cos_dist[i][concept_idx]
UpperCamelCase : int = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : str = round(concept_cos - concept_threshold + adjustment, 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
UpperCamelCase : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : int = cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Optional[int] = round(concept_cos - concept_threshold + adjustment, 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(SCREAMING_SNAKE_CASE_ )
result.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Union[str, Any] = self.vision_model(SCREAMING_SNAKE_CASE_ )[1] # pooled_output
UpperCamelCase : str = self.visual_projection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = cosine_distance(SCREAMING_SNAKE_CASE_, self.special_care_embeds )
UpperCamelCase : Union[str, Any] = cosine_distance(SCREAMING_SNAKE_CASE_, self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Dict = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(special_scores > 0, dim=1 )
UpperCamelCase : List[Any] = special_care * 0.01
UpperCamelCase : Union[str, Any] = special_adjustment.unsqueeze(1 ).expand(-1, cos_dist.shape[1] )
UpperCamelCase : List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : Union[str, Any] = torch.any(concept_scores > 0, dim=1 )
return images, has_nsfw_concepts
| 103 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], ) -> List[str]:
UpperCamelCase : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : int = image_size
UpperCamelCase : List[Any] = min_resolution
UpperCamelCase : int = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Optional[int] = size
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Optional[Any] = image_mean
UpperCamelCase : Tuple = image_std
def snake_case_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> int:
# Initialize image_processor
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : str = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> str:
# Initialize image_processor
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> Tuple:
# Initialize image_processor
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : int = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor )
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : int = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
| 103 | 1 |
import argparse
import json
from tqdm import tqdm
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=snake_case_ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=snake_case_ , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=snake_case_ , help='''where to store parsed gold_data_path file''' , )
__snake_case = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__snake_case = json.load(snake_case_ )
for dpr_record in tqdm(snake_case_ ):
__snake_case = dpr_record['''question''']
__snake_case = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(snake_case_ ) + '''\n''' )
if __name__ == "__main__":
main()
| 24 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCamelCase__ : Optional[Any] = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a , a , a = _str_to_version_tuple(self.version_str )
def __repr__( self : Any ):
'''simple docstring'''
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
return Version(__lowerCamelCase )
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
return other
raise TypeError(F"""{other} (type {type(__lowerCamelCase )}) cannot be compared to version.""" )
def __eq__( self : Union[str, Any] ,__lowerCamelCase : int ):
'''simple docstring'''
try:
a = self._validate_operand(__lowerCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[Any] ,__lowerCamelCase : List[Any] ):
'''simple docstring'''
a = self._validate_operand(__lowerCamelCase )
return self.tuple < other.tuple
def __hash__( self : str ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ,__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return self.version_str
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
a = _VERSION_REG.match(snake_case_ )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(snake_case_ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return ".".join(str(snake_case_ ) for v in version_tuple )
| 357 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple:
"""simple docstring"""
a = FileLock(str(tmpdir / '''foo.lock''' ) )
a = FileLock(str(tmpdir / '''foo.lock''' ) )
a = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
a = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = '''a''' * 1_0_0_0 + '''.lock'''
a = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
a = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 330 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _lowerCamelCase:
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = TFVisionTextDualEncoderModel(lowerCamelCase)
_lowercase : Dict = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> str:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase, text_model=lowerCamelCase)
_lowercase : Dict = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = {'vision_model': vision_model, 'text_model': text_model}
_lowercase : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase)
_lowercase : Union[str, Any] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[str] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase, text_model=lowerCamelCase)
_lowercase : Union[str, Any] = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : Optional[int] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase)
_lowercase : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : str = model(input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase)
_lowercase : List[Any] = after_output[0].numpy()
_lowercase : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase, text_model=lowerCamelCase)
_lowercase : int = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : Dict = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase : Optional[Any] = to_atuple(vision_model.config.image_size)
_lowercase : Optional[int] = to_atuple(vision_model.config.patch_size)
_lowercase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Tuple = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : Any = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[Any] = np.abs((a - b)).max()
self.assertLessEqual(lowerCamelCase, lowerCamelCase, F'''Difference between torch and flax is {diff} (>= {tol}).''')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase)
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase , _lowercase : Tuple = self.get_pretrained_model_and_inputs()
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Optional[int] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase)
_lowercase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCamelCase)
_lowercase : Optional[int] = model_a(**lowerCamelCase)
_lowercase : Optional[int] = after_outputs[0].numpy()
_lowercase : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCamelCase, 1E-5)
@require_tf
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-random-bert')
_lowercase : Union[str, Any] = 13
_lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_lowercase : str = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
_lowercase : Any = random_attention_mask([batch_size, 4])
_lowercase : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : int = TFViTModel(lowerCamelCase, name='vision_model')
_lowercase : Union[str, Any] = TFBertModel(lowerCamelCase, name='text_model')
return vision_model, text_model
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = TFViTModelTester(self)
_lowercase : List[Any] = TFBertModelTester(self)
_lowercase : Any = vit_model_tester.prepare_config_and_inputs()
_lowercase : str = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : str = vision_config_and_inputs
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf', 'hf-internal-testing/tiny-random-roberta')
_lowercase : Dict = 13
_lowercase : Any = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_lowercase : int = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
_lowercase : Tuple = random_attention_mask([batch_size, 4])
_lowercase : Optional[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.get_vision_text_model(lowerCamelCase, lowerCamelCase)
_lowercase : Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowerCamelCase, text_model=lowerCamelCase)
_lowercase : List[Any] = model(
input_ids=lowerCamelCase, pixel_values=lowerCamelCase, attention_mask=lowerCamelCase, output_attentions=lowerCamelCase)
_lowercase : List[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase), vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowercase : Dict = to_atuple(vision_model.config.image_size)
_lowercase : List[str] = to_atuple(vision_model.config.patch_size)
_lowercase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase : Any = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len))
_lowercase : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase), text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = TFDeiTModel(lowerCamelCase, name='vision_model')
_lowercase : str = TFRobertaModel(lowerCamelCase, name='text_model')
return vision_model, text_model
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = TFDeiTModelTester(self)
_lowercase : Tuple = TFRobertaModelTester(self)
_lowercase : Any = vit_model_tester.prepare_config_and_inputs()
_lowercase : List[Any] = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : Optional[Any] = vision_config_and_inputs
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase( _a, unittest.TestCase ):
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf', 'hf-internal-testing/tiny-random-bert')
_lowercase : List[Any] = 13
_lowercase : int = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
_lowercase : List[str] = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
_lowercase : List[Any] = random_attention_mask([batch_size, 4])
_lowercase : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = TFCLIPVisionModel(lowerCamelCase, name='vision_model')
_lowercase : List[str] = TFBertModel(lowerCamelCase, name='text_model')
return vision_model, text_model
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = TFCLIPVisionModelTester(self)
_lowercase : Tuple = TFBertModelTester(self)
_lowercase : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
_lowercase : Any = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase : Any = vision_config_and_inputs
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian', logit_scale_init_value=1.0, from_pt=lowerCamelCase)
_lowercase : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
_lowercase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_lowercase : int = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=lowerCamelCase, padding=lowerCamelCase, return_tensors='np')
_lowercase : Union[str, Any] = model(**lowerCamelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
_lowercase : Any = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy(), lowerCamelCase, atol=1E-3))
| 21 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
# Initialize Result
__SCREAMING_SNAKE_CASE : Tuple = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCamelCase__ : Tuple = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
UpperCamelCase__ : str = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase__ : List[Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase__ : str = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f"Following is minimal change for {value}: ")
UpperCamelCase__ : int = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 112 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
__lowercase = ''''''
while len(A__ ) % 3 != 0:
__lowercase = '''0''' + bin_string
__lowercase = [
bin_string[index : index + 3]
for index in range(len(A__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowercase = 0
for index, val in enumerate(A__ ):
oct_val += int(2 ** (2 - index) * int(A__ ) )
oct_string += str(A__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = RobertaConfig
SCREAMING_SNAKE_CASE : Optional[Any] = 'roberta'
def __init__( self : List[Any] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ )
__lowercase = RobertaEmbeddings(lowercase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RobertaConfig
SCREAMING_SNAKE_CASE : Any = 'roberta'
def __init__( self : Union[str, Any] ,lowercase__ : int ):
super().__init__(lowercase__ )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeRobertaModel(lowercase__ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,lowercase__ : Dict=None ,lowercase__ : List[Any]=None ,lowercase__ : str=None ,lowercase__ : List[Any]=-1 ,lowercase__ : Tuple=False ,):
__lowercase = self.num_layers
try:
__lowercase = self.roberta(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,position_ids=lowercase__ ,head_mask=lowercase__ ,inputs_embeds=lowercase__ ,)
__lowercase = outputs[1]
__lowercase = self.dropout(lowercase__ )
__lowercase = self.classifier(lowercase__ )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(lowercase__ )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 52 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "AutoImageProcessor"
__lowerCAmelCase = "AutoTokenizer"
def __init__( self , __A , __A ) -> List[str]:
super().__init__(__A , __A )
a =self.image_processor
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
a =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
a =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
a =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> str:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> str:
return self.tokenizer.decode(*__A , **__A )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return ["input_ids", "attention_mask", "pixel_values"]
| 81 |
from heapq import heappop, heappush
import numpy as np
def __lowercase ( a__ , a__ , a__ , a__ , ) -> tuple[float | int, list[tuple[int, int]]]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = grid.shape
__SCREAMING_SNAKE_CASE = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [(0, source)], set()
__SCREAMING_SNAKE_CASE = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = np.empty((rows, cols) , dtype=a__ )
__SCREAMING_SNAKE_CASE = None
while queue:
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = heappop(a__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = predecessors[x, y]
path.append(a__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(a__ ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(a__ , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE = dist + 1
__SCREAMING_SNAKE_CASE = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 | 0 |
import numpy
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__SCREAMING_SNAKE_CASE = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__SCREAMING_SNAKE_CASE = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__SCREAMING_SNAKE_CASE = numpy.random.rand(3 , 1 )
# Real output values provided.
__SCREAMING_SNAKE_CASE = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__SCREAMING_SNAKE_CASE = numpy.zeros(output_array.shape )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__SCREAMING_SNAKE_CASE = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _A ( self , _A , _A , _A ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__SCREAMING_SNAKE_CASE = self.feedforward()
self.back_propagation()
if give_loss:
__SCREAMING_SNAKE_CASE = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_arr
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __lowercase ( a__ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def __lowercase ( a__ ) -> numpy.ndarray:
return (value) * (1 - (value))
def __lowercase ( ) -> int:
__SCREAMING_SNAKE_CASE = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__SCREAMING_SNAKE_CASE = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__SCREAMING_SNAKE_CASE = TwoHiddenLayerNeuralNetwork(
input_array=a__ , output_array=a__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a__ , iterations=10 , give_loss=a__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 118 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : List[str] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 | 1 |
def UpperCamelCase( __UpperCamelCase : int = 1000 ):
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 103 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : List[str] = SwinvaConfig()
lowerCAmelCase_ : List[str] = swinva_name.split('''_''' )
lowerCAmelCase_ : str = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase_ : List[Any] = int(name_split[3][-3:] )
else:
lowerCAmelCase_ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase_ : List[str] = int(name_split[2][-2:] )
else:
lowerCAmelCase_ : int = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase_ : Any = 96
lowerCAmelCase_ : List[str] = (2, 2, 6, 2)
lowerCAmelCase_ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase_ : List[str] = 96
lowerCAmelCase_ : Any = (2, 2, 18, 2)
lowerCAmelCase_ : Dict = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase_ : Union[str, Any] = 128
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : Tuple = (4, 8, 16, 32)
else:
lowerCAmelCase_ : Optional[Any] = 192
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCAmelCase_ : Union[str, Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase_ : Optional[int] = 21841
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Tuple = '''imagenet-22k-id2label.json'''
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : str = idalabel
lowerCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ : Optional[int] = 1000
lowerCAmelCase_ : Tuple = '''huggingface/label-files'''
lowerCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = img_size
lowerCAmelCase_ : Dict = num_classes
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Optional[int] = num_heads
lowerCAmelCase_ : Dict = window_size
return config
def UpperCamelCase( __UpperCamelCase : List[str] ):
if "patch_embed.proj" in name:
lowerCAmelCase_ : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ : int = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ : Tuple = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Tuple = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
lowerCAmelCase_ : int = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
lowerCAmelCase_ : Any = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
lowerCAmelCase_ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ : Any = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ : int = name.replace('''head''' ,'''classifier''' )
else:
lowerCAmelCase_ : Union[str, Any] = '''swinv2.''' + name
return name
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Optional[int] = orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase_ : Dict = key.split('''.''' )
lowerCAmelCase_ : Any = int(key_split[1] )
lowerCAmelCase_ : Optional[int] = int(key_split[3] )
lowerCAmelCase_ : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ : Optional[Any] = val[:dim, :]
lowerCAmelCase_ : Any = val[dim : dim * 2, :]
lowerCAmelCase_ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase_ : Dict = val[:dim]
lowerCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase_ : Dict = val[-dim:]
else:
lowerCAmelCase_ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
lowerCAmelCase_ : List[str] = get_swinva_config(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = SwinvaForImageClassification(__UpperCamelCase )
model.eval()
lowerCAmelCase_ : str = convert_state_dict(timm_model.state_dict() ,__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
lowerCAmelCase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='''pt''' )
lowerCAmelCase_ : List[str] = timm_model(inputs['''pixel_values'''] )
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Optional[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 103 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( a , a , a):
lowerCamelCase__ = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self, __a, __a, __a = None, __a = 5_0257, __a = 1024, __a = 768, __a = 12, __a = 12, __a = None, __a = "gelu_new", __a = 0.1, __a = 0.1, __a = 0.1, __a = 1E-5, __a = 0.02, __a = True, __a = True, __a = False, __a = False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal.")
_lowerCAmelCase : Union[str, Any] = prefix_inner_dim
_lowerCAmelCase : List[str] = prefix_hidden_dim
_lowerCAmelCase : Optional[Any] = (
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCAmelCase : str = (
nn.Linear(self.prefix_hidden_dim, __a) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCAmelCase : List[str] = GPTaConfig(
vocab_size=__a, n_positions=__a, n_embd=__a, n_layer=__a, n_head=__a, n_inner=__a, activation_function=__a, resid_pdrop=__a, embd_pdrop=__a, attn_pdrop=__a, layer_norm_epsilon=__a, initializer_range=__a, scale_attn_weights=__a, use_cache=__a, scale_attn_by_inverse_layer_idx=__a, reorder_and_upcast_attn=__a, )
_lowerCAmelCase : Optional[Any] = GPTaLMHeadModel(__a)
def snake_case__ ( self, __a, __a, __a = None, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.transformer.transformer.wte(__a)
_lowerCAmelCase : Tuple = self.encode_prefix(__a)
_lowerCAmelCase : Any = self.decode_prefix(__a)
_lowerCAmelCase : int = torch.cat((prefix_embeds, embedding_text), dim=1)
if labels is not None:
_lowerCAmelCase : Union[str, Any] = self.get_dummy_token(input_ids.shape[0], input_ids.device)
_lowerCAmelCase : Union[str, Any] = torch.cat((dummy_token, input_ids), dim=1)
_lowerCAmelCase : int = self.transformer(inputs_embeds=__a, labels=__a, attention_mask=__a)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.zeros(__a, self.prefix_length, dtype=torch.intaa, device=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encode_prefix(__a)
@torch.no_grad()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = torch.split(__a, 1, dim=0)
_lowerCAmelCase : int = []
_lowerCAmelCase : List[Any] = []
for feature in features:
_lowerCAmelCase : Any = self.decode_prefix(feature.to(__a)) # back to the clip feature
# Only support beam search for now
_lowerCAmelCase , _lowerCAmelCase : Dict = self.generate_beam(
input_embeds=__a, device=__a, eos_token_id=__a)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
_lowerCAmelCase : int = torch.stack(__a)
_lowerCAmelCase : List[str] = torch.stack(__a)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self, __a=None, __a=None, __a=None, __a = 5, __a = 67, __a = 1.0, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : str = eos_token_id
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[Any] = torch.ones(__a, device=__a, dtype=torch.int)
_lowerCAmelCase : Dict = torch.zeros(__a, device=__a, dtype=torch.bool)
if input_embeds is not None:
_lowerCAmelCase : Dict = input_embeds
else:
_lowerCAmelCase : Any = self.transformer.transformer.wte(__a)
for i in range(__a):
_lowerCAmelCase : Tuple = self.transformer(inputs_embeds=__a)
_lowerCAmelCase : Dict = outputs.logits
_lowerCAmelCase : Optional[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCAmelCase : Any = logits.softmax(-1).log()
if scores is None:
_lowerCAmelCase , _lowerCAmelCase : Dict = logits.topk(__a, -1)
_lowerCAmelCase : Tuple = generated.expand(__a, *generated.shape[1:])
_lowerCAmelCase , _lowerCAmelCase : Dict = next_tokens.permute(1, 0), scores.squeeze(0)
if tokens is None:
_lowerCAmelCase : Dict = next_tokens
else:
_lowerCAmelCase : Tuple = tokens.expand(__a, *tokens.shape[1:])
_lowerCAmelCase : str = torch.cat((tokens, next_tokens), dim=1)
else:
_lowerCAmelCase : Union[str, Any] = -float(np.inf)
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : List[Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCAmelCase : List[Any] = scores_sum / seq_lengths[:, None]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = scores_sum_average.view(-1).topk(__a, -1)
_lowerCAmelCase : List[Any] = next_tokens // scores_sum.shape[1]
_lowerCAmelCase : List[Any] = seq_lengths[next_tokens_source]
_lowerCAmelCase : Dict = next_tokens % scores_sum.shape[1]
_lowerCAmelCase : Tuple = next_tokens.unsqueeze(1)
_lowerCAmelCase : List[str] = tokens[next_tokens_source]
_lowerCAmelCase : str = torch.cat((tokens, next_tokens), dim=1)
_lowerCAmelCase : Optional[int] = generated[next_tokens_source]
_lowerCAmelCase : str = scores_sum_average * seq_lengths
_lowerCAmelCase : Tuple = is_stopped[next_tokens_source]
_lowerCAmelCase : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1)
_lowerCAmelCase : Union[str, Any] = torch.cat((generated, next_token_embed), dim=1)
_lowerCAmelCase : Optional[Any] = is_stopped + next_tokens.eq(__a).squeeze()
if is_stopped.all():
break
_lowerCAmelCase : Dict = scores / seq_lengths
_lowerCAmelCase : List[Any] = scores.argsort(descending=__a)
# tokens tensors are already padded to max_seq_length
_lowerCAmelCase : Optional[int] = [tokens[i] for i in order]
_lowerCAmelCase : int = torch.stack(__a, dim=0)
_lowerCAmelCase : int = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 300 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] )
lowercase__ = np.array(_UpperCamelCase )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCamelCase ) ) , x.transpose() ) , _UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> Tuple:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
_UpperCamelCase , exog=_UpperCamelCase , order=_UpperCamelCase , seasonal_order=_UpperCamelCase )
lowercase__ = model.fit(disp=_UpperCamelCase , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(_UpperCamelCase ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> Any:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCamelCase , _UpperCamelCase )
lowercase__ = regressor.predict(_UpperCamelCase )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> str:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(_UpperCamelCase , 25 )
lowercase__ = np.percentile(_UpperCamelCase , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> Any:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : int = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : int = normalize_df[:, 2].tolist()
A : Dict = normalize_df[:, 0].tolist()
A : Tuple = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : List[Any] = normalize_df[:, [1, 2]].tolist()
A : int = x[: len(x) - 1]
A : int = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : str = total_user[: len(total_user) - 1]
A : List[Any] = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : str = total_user[len(total_user) - 1 :]
A : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Tuple = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : List[str] = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__lowerCamelCase = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__lowerCamelCase = text
def lowerCamelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
self.generated_responses.append(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__lowerCamelCase = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__lowerCamelCase = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
lowerCAmelCase__ , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__lowerCamelCase = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:]
__lowerCamelCase = model_inputs.pop('''conversation''' )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = model_outputs['''output_ids''']
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
__lowerCamelCase = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 330 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _a ( a_ ):
A = 42
class _a ( a_ , a_ ):
@register_to_config
def __init__(self, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 88, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = 32, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "geglu", SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, ) -> Tuple:
super().__init__()
UpperCAmelCase_: Optional[Any] = num_attention_heads
UpperCAmelCase_: Optional[int] = attention_head_dim
UpperCAmelCase_: Optional[Any] = num_attention_heads * attention_head_dim
UpperCAmelCase_: Union[str, Any] = in_channels
UpperCAmelCase_: Optional[int] = torch.nn.GroupNorm(num_groups=lowercase_, num_channels=lowercase_, eps=1E-6, affine=lowercase_ )
UpperCAmelCase_: List[str] = nn.Linear(lowercase_, lowercase_ )
# 3. Define transformers blocks
UpperCAmelCase_: Any = nn.ModuleList(
[
BasicTransformerBlock(
lowercase_, lowercase_, lowercase_, dropout=lowercase_, cross_attention_dim=lowercase_, activation_fn=lowercase_, attention_bias=lowercase_, double_self_attention=lowercase_, norm_elementwise_affine=lowercase_, )
for d in range(lowercase_ )
] )
UpperCAmelCase_: Union[str, Any] = nn.Linear(lowercase_, lowercase_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = True, ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: str = hidden_states.shape
UpperCAmelCase_: Union[str, Any] = batch_frames // num_frames
UpperCAmelCase_: Optional[int] = hidden_states
UpperCAmelCase_: Optional[Any] = hidden_states[None, :].reshape(lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ )
UpperCAmelCase_: int = hidden_states.permute(0, 2, 1, 3, 4 )
UpperCAmelCase_: Optional[int] = self.norm(lowercase_ )
UpperCAmelCase_: Union[str, Any] = hidden_states.permute(0, 3, 4, 2, 1 ).reshape(batch_size * height * width, lowercase_, lowercase_ )
UpperCAmelCase_: Any = self.proj_in(lowercase_ )
# 2. Blocks
for block in self.transformer_blocks:
UpperCAmelCase_: List[Any] = block(
lowercase_, encoder_hidden_states=lowercase_, timestep=lowercase_, cross_attention_kwargs=lowercase_, class_labels=lowercase_, )
# 3. Output
UpperCAmelCase_: List[Any] = self.proj_out(lowercase_ )
UpperCAmelCase_: Tuple = (
hidden_states[None, None, :]
.reshape(lowercase_, lowercase_, lowercase_, lowercase_, lowercase_ )
.permute(0, 3, 4, 1, 2 )
.contiguous()
)
UpperCAmelCase_: Any = hidden_states.reshape(lowercase_, lowercase_, lowercase_, lowercase_ )
UpperCAmelCase_: Optional[int] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowercase_ )
| 364 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
a : Tuple = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 82 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A__ ( __snake_case ):
_UpperCAmelCase :int = 'gpt_neo'
_UpperCAmelCase :int = ['past_key_values']
_UpperCAmelCase :int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , A_=5_0257 , A_=2048 , A_=2048 , A_=24 , A_=[[["global", "local"], 12]] , A_=16 , A_=None , A_=256 , A_="gelu_new" , A_=0.0 , A_=0.0 , A_=0.0 , A_=0.1 , A_=1e-5 , A_=0.02 , A_=True , A_=5_0256 , A_=5_0256 , **A_ , ):
'''simple docstring'''
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : List[Any] = hidden_size
UpperCamelCase : str = num_layers
UpperCamelCase : Any = num_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : int = window_size
UpperCamelCase : Any = activation_function
UpperCamelCase : List[Any] = resid_dropout
UpperCamelCase : List[str] = embed_dropout
UpperCamelCase : str = attention_dropout
UpperCamelCase : Optional[Any] = classifier_dropout
UpperCamelCase : List[Any] = layer_norm_epsilon
UpperCamelCase : Any = initializer_range
UpperCamelCase : Union[str, Any] = use_cache
UpperCamelCase : Optional[Any] = bos_token_id
UpperCamelCase : List[Any] = eos_token_id
UpperCamelCase : Optional[int] = attention_types
UpperCamelCase : Any = self.expand_attention_types_params(A_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
import torch
UpperCamelCase : List[str] = input.size()
UpperCamelCase : Any = len(_lowerCAmelCase )
UpperCamelCase : Dict = shape[dimension]
UpperCamelCase : Optional[Any] = torch.arange(0 , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : str = torch.div(sizedim - size , _lowerCAmelCase , rounding_mode="floor" ) + 1
UpperCamelCase : Union[str, Any] = torch.arange(_lowerCAmelCase ) + low_indices[:min_length][:, None]
UpperCamelCase : Dict = [slice(_lowerCAmelCase )] * rank
UpperCamelCase : Dict = indices
UpperCamelCase : str = input[s]
UpperCamelCase : Any = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
import torch
UpperCamelCase : Optional[int] = torch.arange(1 , _lowerCAmelCase )
UpperCamelCase : str = torch.remainder(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[int] = remainders == 0
UpperCamelCase : Tuple = candidates[divisor_indices]
UpperCamelCase : int = torch.max(_lowerCAmelCase )
return largest_divisor, torch.div(_lowerCAmelCase , _lowerCAmelCase , rounding_mode="floor" )
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="inputs" )
UpperCamelCase : Optional[int] = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self._config.num_heads
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : List[str] = super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : int = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Optional[int] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase : Optional[Any] = seqlen + 2
UpperCamelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase : Any = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
UpperCamelCase : Any = common_inputs["attention_mask"]
if self.use_past:
UpperCamelCase : Any = ordered_inputs["attention_mask"].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 13
| 52 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Optional[int] = 'convnextv2'
def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Union[str, Any] = num_stages
UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : str = drop_path_rate
UpperCamelCase : List[str] = image_size
UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 52 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
SCREAMING_SNAKE_CASE : str = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
SCREAMING_SNAKE_CASE : Optional[Any] = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
SCREAMING_SNAKE_CASE : List[str] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
SCREAMING_SNAKE_CASE : str = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def UpperCamelCase ( self , UpperCamelCase_ ):
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0.9 , UpperCamelCase_=3 , UpperCamelCase_=0.5 ):
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase_ :int = [
meteor_score.single_meteor_score(
word_tokenize(__UpperCAmelCase ) , word_tokenize(__UpperCAmelCase ) , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , gamma=__UpperCAmelCase )
for ref, pred in zip(__UpperCAmelCase , __UpperCAmelCase )
]
else:
lowercase_ :Dict = [
meteor_score.single_meteor_score(__UpperCAmelCase , __UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , gamma=__UpperCAmelCase )
for ref, pred in zip(__UpperCAmelCase , __UpperCAmelCase )
]
return {"meteor": np.mean(__UpperCAmelCase )}
| 366 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :int = logging.get_logger()
# the current default level is logging.WARNING
lowercase_ :List[str] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Tuple = logging.get_verbosity()
lowercase_ :str = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowercase_ :Tuple = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def UpperCamelCase ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase_ :Any = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowercase_ :Optional[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase_ )
lowercase_ :Any = logging.log_levels[env_level_str]
lowercase_ :Optional[int] = logging.get_verbosity()
self.assertEqual(
UpperCamelCase_ , UpperCamelCase_ , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
lowercase_ :str = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def UpperCamelCase ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
lowercase_ :Any = logging.logging.getLogger()
with CaptureLogger(UpperCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def UpperCamelCase ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
lowercase_ :Optional[int] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
lowercase_ :Any = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 252 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ""
else:
SCREAMING_SNAKE_CASE_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = "huggingface/label-files"
SCREAMING_SNAKE_CASE_ = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
SCREAMING_SNAKE_CASE_ = 1_9_2
SCREAMING_SNAKE_CASE_ = 7_6_8
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 3
elif deit_name[9:].startswith("small" ):
SCREAMING_SNAKE_CASE_ = 3_8_4
SCREAMING_SNAKE_CASE_ = 1_5_3_6
SCREAMING_SNAKE_CASE_ = 1_2
SCREAMING_SNAKE_CASE_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
# load original model from timm
SCREAMING_SNAKE_CASE_ = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase , __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = DeiTForImageClassificationWithTeacher(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE_ = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE_ = DeiTImageProcessor(size=__UpperCamelCase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE_ = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE_ = encoding["pixel_values"]
SCREAMING_SNAKE_CASE_ = model(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
A : Dict = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 118 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 1_0 << 2_0 # 10MB
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = JsonConfig
def __A ( self : Optional[int] ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : List[str] , __magic_name__ : str ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : str , __magic_name__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(__magic_name__ ).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def __A ( self : List[str] , __magic_name__ : List[str] ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("utf-8" )
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 118 | 1 |
from jiwer import compute_measures
import datasets
_snake_case = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_snake_case = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
_snake_case = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def a__ ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def a__ ( self , _a=None , _a=None , _a=False ) -> Tuple:
if concatenate_texts:
return compute_measures(_a , _a )["wer"]
else:
_A : Tuple = 0
_A : int = 0
for prediction, reference in zip(_a , _a ):
_A : List[str] = compute_measures(_a , _a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 343 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :List[Any]=0 ):
'''simple docstring'''
A_ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case ) )
A_ : str = np.random.RandomState(snake_case )
A_ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case )
A_ : str = self.get_dummy_inputs()
A_ : int = pipe(**snake_case ).images
A_ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A_ : Optional[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A_ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A_ : str = self.get_dummy_inputs()
A_ : Tuple = pipe(**snake_case ).images
A_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : Optional[int] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
# warmup pass to apply optimizations
A_ : Dict = pipe(**self.get_dummy_inputs() )
A_ : Any = self.get_dummy_inputs()
A_ : List[str] = pipe(**snake_case ).images
A_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : List[str] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A_ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A_ : Tuple = self.get_dummy_inputs()
A_ : str = pipe(**snake_case ).images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A_ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A_ : str = self.get_dummy_inputs()
A_ : Optional[Any] = pipe(**snake_case ).images
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : Union[str, Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
A_ : List[Any] = self.get_dummy_inputs()
A_ : Union[str, Any] = pipe(**snake_case ).images
A_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A_ : Any = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : int = ort.SessionOptions()
A_ : int = False
return options
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A_ : Union[str, Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=snake_case , feature_extractor=snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
A_ : Optional[int] = "A fantasy landscape, trending on artstation"
A_ : Any = np.random.RandomState(0 )
A_ : Any = pipe(
prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case , output_type="np" , )
A_ : Tuple = output.images
A_ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A_ : List[str] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A_ : Optional[int] = init_image.resize((768, 512) )
A_ : Tuple = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A_ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
A_ : Any = "A fantasy landscape, trending on artstation"
A_ : Tuple = np.random.RandomState(0 )
A_ : Union[str, Any] = pipe(
prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case , output_type="np" , )
A_ : Dict = output.images
A_ : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A_ : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 300 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300 | 1 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase_ : Dict = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCAmelCase_ : Tuple = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCAmelCase_ : Dict = 'zero2'
lowerCAmelCase_ : Optional[Any] = 'zero3'
lowerCAmelCase_ : Optional[Any] = [ZEROa, ZEROa]
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] ) -> Tuple:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_a = parameterized.to_safe_name("_".join(str(lowercase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowerCAmelCase_ : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@parameterized.expand(__a , name_func=__a )
def UpperCamelCase__ ( self : Dict , __a : Union[str, Any] , __a : Any ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@parameterized.expand(__a , name_func=__a )
def UpperCamelCase__ ( self : List[str] , __a : List[Any] , __a : Tuple ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def UpperCamelCase__ ( self : Optional[int] , __a : int , __a : str ):
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
def UpperCamelCase__ ( self : str , __a : Union[str, Any] ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCamelCase__ ( self : Optional[Any] , __a : str , __a : str , __a : int = 10 , __a : bool = True , __a : bool = True , __a : bool = True , ):
_a = models[model]
_a = self.run_trainer(
stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , )
self.do_checks(__a )
return output_dir
def UpperCamelCase__ ( self : Union[str, Any] , __a : str , __a : str , __a : int = 10 , __a : int = 1 , __a : bool = True , __a : bool = True , ):
_a = self.get_auto_remove_tmp_dir("./xxx" , after=__a )
_a = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__a )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_a = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
_a = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
_a = self.get_launcher(__a )
_a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__a , env=self.get_env() )
return output_dir
def UpperCamelCase__ ( self : int , __a : Union[str, Any]=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_a = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 346 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =BertGenerationTokenizer
__a =False
__a =True
def UpperCamelCase__ ( self : Optional[Any] ):
super().setUp()
_a = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self : Tuple ):
_a = "<s>"
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ ( self : List[str] ):
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__a ) , 10_02 )
def UpperCamelCase__ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCamelCase__ ( self : Tuple ):
_a = BertGenerationTokenizer(__a , keep_accents=__a )
_a = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [2_85, 46, 10, 1_70, 3_82] , )
_a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_a = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_a = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase__ ( self : Any ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = "Hello World!"
_a = [1_85_36, 22_60, 1_01]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_a = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def UpperCamelCase__ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_a = list(self.big_tokenizer.get_vocab().keys() )[:10]
_a = " ".join(__a )
_a = self.big_tokenizer.encode_plus(__a , return_tensors="pt" , return_token_type_ids=__a )
_a = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__a )
_a = BertGenerationConfig()
_a = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
# fmt: off
_a = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 346 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 |
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = 0
while number > 0:
_lowerCAmelCase = number % 10
sum_of_digits += last_digit
_lowerCAmelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _UpperCAmelCase ( snake_case = 1_00 ):
"""simple docstring"""
_lowerCAmelCase = factorial(snake_case )
_lowerCAmelCase = split_and_add(snake_case )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 82 | 0 |
import numpy
# List of input, output pairs
UpperCAmelCase__ : str =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase__ : int =(((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCAmelCase__ : Optional[Any] =[2, 4, 1, 5]
UpperCAmelCase__ : Optional[Any] =len(train_data)
UpperCAmelCase__ : Optional[Any] =0.009
def lowerCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase="train" ) -> int:
return calculate_hypothesis_value(_UpperCAmelCase , _UpperCAmelCase ) - output(
_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ ( _UpperCAmelCase ) -> str:
lowerCamelCase =0
for i in range(len(_UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase=m ) -> Dict:
lowerCamelCase =0
for i in range(_UpperCAmelCase ):
if index == -1:
summation_value += _error(_UpperCAmelCase )
else:
summation_value += _error(_UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase =summation_of_cost_derivative(_UpperCAmelCase , _UpperCAmelCase ) / m
return cost_derivative_value
def lowerCamelCase__ ( ) -> Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase =0.0_0_0_0_0_2
lowerCamelCase =0
lowerCamelCase =0
while True:
j += 1
lowerCamelCase =[0, 0, 0, 0]
for i in range(0 , len(_UpperCAmelCase ) ):
lowerCamelCase =get_cost_derivative(i - 1 )
lowerCamelCase =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCAmelCase , _UpperCAmelCase , atol=_UpperCAmelCase , rtol=_UpperCAmelCase , ):
break
lowerCamelCase =temp_parameter_vector
print(("""Number of iterations:""", j) )
def lowerCamelCase__ ( ) -> Union[str, Any]:
for i in range(len(_UpperCAmelCase ) ):
print(("""Actual output value:""", output(_UpperCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(_UpperCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 370 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase__ : Union[str, Any] =subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
UpperCAmelCase__ : List[str] =(
subprocess.check_output(F"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode('''utf-8''').split()
)
UpperCAmelCase__ : Tuple ='''|'''.join(sys.argv[1:])
UpperCAmelCase__ : List[str] =re.compile(rF"^({joined_dirs}).*?\.py$")
UpperCAmelCase__ : Any =[x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 262 | 0 |
import math
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 1:
lowerCamelCase__: List[str] =F"""Input value of [number={number}] must be > 0"""
raise ValueError(__a )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCamelCase__: Optional[int] =int(math.log(number // 3 , 2 ) ) + 2
lowerCamelCase__: int =[3, 5]
lowerCamelCase__: int =2
lowerCamelCase__: Tuple =3
for block in range(1 , __a ):
for _ in range(__a ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__A = 0
try:
__A = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 10 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=30 , A=4_00 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_55 , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = do_normalize
lowerCamelCase = image_mean
lowerCamelCase = image_std
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_pad
def __A ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self , A , A=False ) -> List[Any]:
'''simple docstring'''
if not batched:
lowerCamelCase = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase , lowerCamelCase = image.size
else:
lowerCamelCase , lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase = int(self.size["""shortest_edge"""] * h / w )
lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
lowerCamelCase = self.size["""shortest_edge"""]
lowerCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCamelCase = self.size["""shortest_edge"""]
lowerCamelCase = self.size["""shortest_edge"""]
else:
lowerCamelCase = []
for image in image_inputs:
lowerCamelCase , lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase = max(A , key=lambda A : item[0] )[0]
lowerCamelCase = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = YolosImageProcessingTester(self )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCamelCase = image_processing_a.pad(A , return_tensors="""pt""" )
lowerCamelCase = image_processing_a(A , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
lowerCamelCase = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowerCamelCase = image_processing(images=A , annotations=A , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A )
lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) )
# verify class_labels
lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
lowerCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCamelCase = YolosImageProcessor(format="""coco_panoptic""" )
lowerCamelCase = image_processing(images=A , annotations=A , masks_path=A , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A )
lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) )
# verify class_labels
lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) )
# verify masks
lowerCamelCase = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
| 252 | 0 |
def UpperCamelCase ( _A : float , _A : float )-> float:
"""simple docstring"""
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(_A ) * abs(_A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 198 |
def UpperCamelCase ( _A : int , _A : int )-> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( )-> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 198 | 1 |
from jiwer import compute_measures
import datasets
_SCREAMING_SNAKE_CASE = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_SCREAMING_SNAKE_CASE = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
_SCREAMING_SNAKE_CASE = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Optional[int]=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(lowerCamelCase_ , lowerCamelCase_ )["wer"]
else:
UpperCamelCase = 0
UpperCamelCase = 0
for prediction, reference in zip(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase = compute_measures(lowerCamelCase_ , lowerCamelCase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 343 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFResNetModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = TFResNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 343 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
if not (isinstance(a__ , a__ ) and isinstance(a__ , a__ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
_UpperCamelCase : Optional[int] = len(a__ )
_UpperCamelCase : Optional[int] = len(a__ )
_UpperCamelCase : List[Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Any = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_UpperCamelCase : Union[str, Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_UpperCamelCase : Union[str, Any] = i
_UpperCamelCase : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Any=18 ,lowerCamelCase__ : Tuple=30 ,lowerCamelCase__ : Tuple=400 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=True ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Any=True ,):
'''simple docstring'''
_UpperCamelCase : Any = size if size is not None else {'shortest_edge': 20}
_UpperCamelCase : int = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCamelCase : List[str] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : int = num_channels
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : List[Any] = min_resolution
_UpperCamelCase : List[str] = max_resolution
_UpperCamelCase : int = do_resize
_UpperCamelCase : Optional[int] = size
_UpperCamelCase : str = do_center_crop
_UpperCamelCase : List[Any] = crop_size
_UpperCamelCase : Optional[Any] = do_flip_channel_order
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_flip_channel_order' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
_UpperCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
_UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : int = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
_UpperCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : List[str] = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
_UpperCamelCase : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : Optional[int] = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 236 | 0 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCAmelCase_ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
UpperCAmelCase_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCAmelCase_ = 'zero2'
UpperCAmelCase_ = 'zero3'
UpperCAmelCase_ = [ZEROa, ZEROa]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = parameterized.to_safe_name("""_""".join(str(SCREAMING_SNAKE_CASE__ ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
UpperCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : str ):
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ):
"""simple docstring"""
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = models[model]
UpperCAmelCase__ = self.run_trainer(
stage=_UpperCAmelCase , model_name=_UpperCAmelCase , eval_steps=_UpperCAmelCase , num_train_epochs=1 , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
self.do_checks(_UpperCAmelCase )
return output_dir
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = self.get_auto_remove_tmp_dir("""./xxx""" , after=_UpperCAmelCase )
UpperCAmelCase__ = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCAmelCase__ = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
UpperCAmelCase__ = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
UpperCAmelCase__ = self.get_launcher(_UpperCAmelCase )
UpperCAmelCase__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : List[str]=False ):
"""simple docstring"""
UpperCAmelCase__ = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 346 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case : str = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : str=None ):
'''simple docstring'''
if rng is None:
_a = random.Random()
_a = 1
for dim in shape:
total_dims *= dim
_a = []
for _ in range(UpperCamelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_a = np.array(UpperCamelCase , dtype=jnp.intaa ).reshape(UpperCamelCase )
return output
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : str=None ):
'''simple docstring'''
_a = ids_tensor(UpperCamelCase , vocab_size=2 , rng=UpperCamelCase )
# make sure that at least one token is attended to for each batch
_a = 1
return attn_mask
@require_flax
class A :
lowercase_ = None
lowercase_ = ()
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_a = 2
_a = inputs['''input_ids'''].shape[-1] // 2
_a = inputs['''input_ids'''][:max_batch_size, :sequence_length]
_a = jnp.ones_like(lowerCAmelCase_ )
_a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 0
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model_class.__name__[4:] # Skip the "Flax" at the beginning
_a = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
_a = pt_model_class(lowerCAmelCase_ ).eval()
_a = load_flax_weights_in_pytorch_model(lowerCAmelCase_ , flax_model.params )
_a = flax_model.generate(lowerCAmelCase_ ).sequences
_a = pt_model.generate(torch.tensor(lowerCAmelCase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = True
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 2
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 2
_a = 2
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = True
_a = max_length
_a = 0.8
_a = 10
_a = 0.3
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = max_length
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = max_length
_a = 2
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = False
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = True
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = 2
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = model.generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase_ )
_a = jit(model.generate )
_a = jit_generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
_a = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_a = '''Hello world'''
_a = tokenizer(lowerCAmelCase_ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase_ , '''do_samples''' ):
model.generate(lowerCAmelCase_ , do_samples=lowerCAmelCase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase_ , '''foo''' ):
_a = {'''foo''': '''bar'''}
model.generate(lowerCAmelCase_ , **lowerCAmelCase_ )
| 351 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A ( _a ,_a ):
lowercase_ = 'nat'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[Any]=64 , lowerCAmelCase_ : Dict=[3, 4, 6, 5] , lowerCAmelCase_ : Dict=[2, 4, 8, 16] , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Dict=3.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(lowerCAmelCase_ )
_a = num_heads
_a = kernel_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_a = layer_scale_init_value
_a = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 179 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> float:
lowercase__ : Dict = u
for i in range(1 , lowerCAmelCase_ ):
lowercase__ : Union[str, Any] = temp * (u - i)
return temp
def __UpperCAmelCase ( ) -> None:
lowercase__ : Tuple = int(input('''enter the numbers of values: ''' ) )
lowercase__ : list[list[float]] = []
for _ in range(lowerCAmelCase_ ):
y.append([] )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
y[i].append(lowerCAmelCase_ )
lowercase__ : Tuple = 0
print('''enter the values of parameters in a list: ''' )
lowercase__ : List[str] = list(map(lowerCAmelCase_ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowerCAmelCase_ ):
lowercase__ : int = float(input() )
lowercase__ : Dict = int(input('''enter the value to interpolate: ''' ) )
lowercase__ : str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase_ ):
for j in range(n - i ):
lowercase__ : Tuple = y[j + 1][i - 1] - y[j][i - 1]
lowercase__ : Optional[Any] = y[0][0]
for i in range(1 , lowerCAmelCase_ ):
summ += (ucal(lowerCAmelCase_ , lowerCAmelCase_ ) * y[0][i]) / math.factorial(lowerCAmelCase_ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 16 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : Union[str, Any] ="""pt"""
elif is_tf_available():
_UpperCAmelCase : List[Any] ="""tf"""
else:
_UpperCAmelCase : Optional[int] ="""jax"""
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def lowercase_ ( self ) -> Optional[int]:
super().setUp()
lowerCAmelCase_ : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ) -> Any:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase_ ( self , **__lowercase ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def lowercase_ ( self , __lowercase , __lowercase=False , __lowercase=2_0 , __lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase_ : Optional[Any] = []
for i in range(len(__lowercase ) ):
try:
lowerCAmelCase_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase_ : List[str] = list(filter(lambda __lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
lowerCAmelCase_ : Optional[int] = list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
lowerCAmelCase_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
lowerCAmelCase_ : Union[str, Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
lowerCAmelCase_ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
lowerCAmelCase_ : Any = ''' ''' + output_txt
lowerCAmelCase_ : List[str] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = self.perceiver_tokenizer
lowerCAmelCase_ : Any = '''Unicode €.'''
lowerCAmelCase_ : Dict = tokenizer(__lowercase )
lowerCAmelCase_ : Any = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : str = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase_ : Optional[int] = tokenizer('''e è é ê ë''' )
lowerCAmelCase_ : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
lowerCAmelCase_ : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
lowerCAmelCase_ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCAmelCase_ : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
lowerCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = self.perceiver_tokenizer
lowerCAmelCase_ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : List[Any] = tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase_ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCAmelCase_ : List[str] = tokenizer(
text_target=__lowercase , max_length=3_2 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
def lowercase_ ( self ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
lowerCAmelCase_ : str = ''' He is very happy, UNwant\u00E9d,running'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Any = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Tuple = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase_ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase_ : str = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase_ : str = tokenizer.__class__.from_pretrained(__lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Tuple = json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase_ : Any = json.load(__lowercase )
lowerCAmelCase_ : Optional[int] = [f"""<extra_id_{i}>""" for i in range(1_2_5 )]
lowerCAmelCase_ : Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCAmelCase_ : Any = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase_ : int = tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase_ : Tuple = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
lowerCAmelCase_ : Dict = tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , '''�''' )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Any:
pass
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> List[str]:
pass
def lowercase_ ( self ) -> Dict:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ : List[str] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
| 262 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = [[1, 2, 4], [1, 2, 3, 4]]
snake_case_ : str = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = [[1, 2, 3], [1, 2, 4]]
snake_case_ : int = DisjunctiveConstraint(_A )
snake_case_ : List[Any] = dc.update(1 )
snake_case_ : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case_ : Tuple = dc.update(2 )
snake_case_ : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case_ : Optional[Any] = dc.update(3 )
snake_case_ : List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case_ : List[Any] = DisjunctiveConstraint(_A )
snake_case_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case_ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 369 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = None
if token is not None:
snake_case_ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case_ : Optional[int] = requests.get(__a , headers=__a ).json()
snake_case_ : List[str] = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case_ : Dict = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : Optional[Any] = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Union[str, Any] = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
snake_case_ : Union[str, Any] = requests.get(__a , headers=__a ).json()
snake_case_ : Any = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
snake_case_ : str = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : int = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Dict = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[int] = requests.get(__a , headers=__a , allow_redirects=__a )
snake_case_ : str = result.headers['Location']
snake_case_ : List[str] = requests.get(__a , allow_redirects=__a )
snake_case_ : Optional[Any] = os.path.join(__a , f"""{artifact_name}.zip""" )
with open(__a , 'wb' ) as fp:
fp.write(response.content )
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = []
snake_case_ : Tuple = None
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__a ) as f:
for line in f:
snake_case_ : Tuple = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case_ : Tuple = line[: line.index(': ' )]
snake_case_ : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
snake_case_ : Any = line[len('FAILED ' ) :]
failed_tests.append(__a )
elif filename == "job_name.txt":
snake_case_ : Union[str, Any] = line
if len(__a ) != len(__a ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__a )} for `errors` """
f"""and {len(__a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
snake_case_ : List[str] = None
if job_name and job_links:
snake_case_ : Union[str, Any] = job_links.get(__a , __a )
# A list with elements of the form (line of error, error, failed test)
snake_case_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(__a , __a )]
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = [os.path.join(__a , __a ) for p in os.listdir(__a ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__a , job_links=__a ) )
return errors
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
snake_case_ : str = counter.most_common()
snake_case_ : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case_ : int = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = test.split('::' )[0]
if test.startswith('tests/models/' ):
snake_case_ : List[str] = test.split('/' )[2]
else:
snake_case_ : Union[str, Any] = None
return test
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case_ : str = [x for x in logs if x[2] is not None]
snake_case_ : int = {x[2] for x in logs}
snake_case_ : Dict = {}
for test in tests:
snake_case_ : List[str] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case_ : Any = counter.most_common()
snake_case_ : str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case_ : Tuple = sum(error_counts.values() )
if n_errors > 0:
snake_case_ : List[Any] = {'count': n_errors, 'errors': error_counts}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| no. | error | status |'
snake_case_ : str = '|-:|:-|:-|'
snake_case_ : Tuple = [header, sep]
for error in reduced_by_error:
snake_case_ : Dict = reduced_by_error[error]['count']
snake_case_ : List[str] = f"""| {count} | {error[:1_00]} | |"""
lines.append(__a )
return "\n".join(__a )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| model | no. of errors | major error | count |'
snake_case_ : Union[str, Any] = '|-:|-:|-:|-:|'
snake_case_ : Optional[int] = [header, sep]
for model in reduced_by_model:
snake_case_ : Any = reduced_by_model[model]['count']
snake_case_ ,snake_case_ : Dict = list(reduced_by_model[model]['errors'].items() )[0]
snake_case_ : Any = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__a )
return "\n".join(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(""" / """)
_SCREAMING_SNAKE_CASE = k[index + len(""" / """) :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 88 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "FlavaImageProcessor"
SCREAMING_SNAKE_CASE = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
lowercase__ : str = kwargs.pop('''feature_extractor''' )
lowercase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : str = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> List[str]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : Dict = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if images is not None:
lowercase__ : Optional[int] = self.image_processor(
__lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(__lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[Any] = self.tokenizer.model_input_names
lowercase__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCAmelCase( self ) -> int:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def _lowerCAmelCase( self ) -> int:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 198 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[Any] = 0
while number > 0:
lowercase__ : str = number % 10
sum_of_digits += last_digit
lowercase__ : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __UpperCamelCase ( UpperCAmelCase = 100 ):
lowercase__ : Optional[int] = factorial(UpperCAmelCase )
lowercase__ : Dict = split_and_add(UpperCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 198 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : int ) -> Dict:
__magic_name__ : str = (3, 32, 128)
__magic_name__ : Tuple = tempfile.mkdtemp()
# fmt: off
__magic_name__ : Dict = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__magic_name__ : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__magic_name__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
__magic_name__ : Tuple = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
__magic_name__ : List[str] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def __lowerCAmelCase ( self : Any , **_A : List[str] ) -> Tuple:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : List[Any] , **_A : Optional[int] ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__magic_name__ : Union[str, Any] = Image.fromarray(np.moveaxis(_A , 0 , -1 ) )
return image_input
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[int] = self.get_tokenizer()
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : Optional[int] = MgpstrProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : List[Any] = self.get_image_processor()
__magic_name__ : Dict = MgpstrProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__magic_name__ : Optional[int] = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
__magic_name__ : List[str] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.get_image_processor()
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : Optional[int] = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__magic_name__ : Dict = self.prepare_image_inputs()
__magic_name__ : List[str] = image_processor(_A , return_tensors='np' )
__magic_name__ : Dict = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_tokenizer()
__magic_name__ : Union[str, Any] = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__magic_name__ : Optional[Any] = 'test'
__magic_name__ : str = processor(text=_A )
__magic_name__ : List[Any] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : Union[str, Any] = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__magic_name__ : str = 'test'
__magic_name__ : int = self.prepare_image_inputs()
__magic_name__ : int = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_tokenizer()
__magic_name__ : Tuple = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__magic_name__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ : Optional[int] = processor.char_decode(_A )
__magic_name__ : Tuple = tokenizer.batch_decode(_A )
__magic_name__ : List[str] = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
__magic_name__ : int = self.get_image_processor()
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Tuple = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__magic_name__ : Tuple = None
__magic_name__ : Any = self.prepare_image_inputs()
__magic_name__ : Optional[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : List[str] = MgpstrProcessor(tokenizer=_A , image_processor=_A )
__magic_name__ : List[str] = torch.randn(1 , 27 , 38 )
__magic_name__ : Optional[Any] = torch.randn(1 , 27 , 50257 )
__magic_name__ : Dict = torch.randn(1 , 27 , 30522 )
__magic_name__ : Any = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 356 |
'''simple docstring'''
lowerCAmelCase :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase :Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 275 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 8 , _SCREAMING_SNAKE_CASE : int | None = None ):
__a : int = np.random.default_rng(seed=_SCREAMING_SNAKE_CASE )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__a : Dict = 6 * key_len
# Measurement basis for Alice's qubits.
__a : str = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# The set of states Alice will prepare.
__a : Dict = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# Measurement basis for Bob's qubits.
__a : List[str] = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# Quantum Circuit to simulate BB84
__a : Union[str, Any] = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_SCREAMING_SNAKE_CASE ):
if alice_state[index] == 1:
bbaa_circ.x(_SCREAMING_SNAKE_CASE )
if alice_basis[index] == 1:
bbaa_circ.h(_SCREAMING_SNAKE_CASE )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_SCREAMING_SNAKE_CASE ):
if bob_basis[index] == 1:
bbaa_circ.h(_SCREAMING_SNAKE_CASE )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__a : str = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__a : List[Any] = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1 , seed_simulator=_SCREAMING_SNAKE_CASE )
# Returns the result of measurement.
__a : Tuple = job.result().get_counts(_SCREAMING_SNAKE_CASE ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__a : int = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__a : Any = gen_key[:key_len] if len(_SCREAMING_SNAKE_CASE ) >= key_len else gen_key.ljust(_SCREAMING_SNAKE_CASE , '0' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 27 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = "▁"
_UpperCAmelCase : int = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_UpperCAmelCase : str = {
"facebook/s2t-small-librispeech-asr": 1024,
}
_UpperCAmelCase : List[str] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_UpperCAmelCase : Tuple = {"mustc": MUSTC_LANGS}
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = MAX_MODEL_INPUT_SIZES
_a = ['''input_ids''', '''attention_mask''']
_a = []
def __init__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: List[str]="</s>" , _lowerCAmelCase: Optional[int]="<pad>" , _lowerCAmelCase: List[str]="<unk>" , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Tuple , ):
lowercase :str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_upper_case=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , lang_codes=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :List[str] = do_upper_case
lowercase :List[str] = do_lower_case
lowercase :str = load_json(_lowerCAmelCase )
lowercase :List[Any] = {v: k for k, v in self.encoder.items()}
lowercase :str = spm_file
lowercase :Any = load_spm(_lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
lowercase :List[str] = lang_codes
lowercase :Tuple = LANGUAGES[lang_codes]
lowercase :Any = [F"<lang:{lang}>" for lang in self.langs]
lowercase :Any = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
lowercase :Optional[Any] = self.lang_tokens
lowercase :List[str] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase :Tuple = {}
@property
def SCREAMING_SNAKE_CASE ( self: Tuple ):
return len(self.encoder )
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._tgt_lang
@tgt_lang.setter
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Optional[Any] ):
lowercase :Any = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str ):
lowercase :Any = self.lang_code_to_id[tgt_lang]
lowercase :List[Any] = [lang_code_id]
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Tuple ):
return self.encoder.get(_lowerCAmelCase , self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: int ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
lowercase :int = []
lowercase :Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase :Dict = self.sp_model.decode(_lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase :Any = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase :Dict = self.sp_model.decode(_lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase :int = [1] * len(self.prefix_tokens )
lowercase :List[str] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :int = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ):
lowercase :Union[str, Any] = self.__dict__.copy()
lowercase :Dict = None
return state
def __setstate__( self: Union[str, Any] , _lowerCAmelCase: Dict ):
lowercase :List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :List[Any] = {}
lowercase :Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
lowercase :List[str] = Path(_lowerCAmelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
lowercase :List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase :Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :int = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (str(_lowerCAmelCase ), str(_lowerCAmelCase ))
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase )
spm.Load(str(lowerCamelCase ) )
return spm
def UpperCAmelCase__ ( lowerCamelCase ):
with open(lowerCamelCase, "r" ) as f:
return json.load(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, "w" ) as f:
json.dump(lowerCamelCase, lowerCamelCase, indent=2 )
| 236 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__a ) , '''Tatoeba directory does not exist.''' )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Tuple = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__a )
assert mmeta["long_pair"] == "heb-eng"
| 364 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> None:
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
| 107 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
if n == 0:
return 0
__A : int = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + naive_cut_rod_recursive(n - i ,snake_case_ ) )
return max_revue
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->int:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
__A : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_ ,snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : list ,snake_case_ : list ) ->Any:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A : Any = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,snake_case_ ,snake_case_ ) ,)
__A : Any = max_revenue
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
__A : List[Any] = 0
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max_rev[i]
for j in range(1 ,i + 1 ):
__A : str = max(snake_case_ ,prices[j - 1] + max_rev[i - j] )
__A : List[str] = max_revenue_i
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Union[str, Any]:
'''simple docstring'''
if n < 0:
__A : Union[str, Any] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
__A : List[Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Any = [6, 10, 12, 15, 20, 23]
__A : Union[str, Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A : str = 36
__A : Any = top_down_cut_rod(snake_case_ ,snake_case_ )
__A : Any = bottom_up_cut_rod(snake_case_ ,snake_case_ )
__A : Optional[Any] = naive_cut_rod_recursive(snake_case_ ,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 179 | 0 |
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=UpperCamelCase ):
"""simple docstring"""
snake_case = ["torch", "scipy"]
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _snake_case ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
| 65 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case = StableDiffusionLDMaDPipeline
snake_case = TEXT_TO_IMAGE_PARAMS
snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self )->str:
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ : str = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
A_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : str = CLIPTextModel(_SCREAMING_SNAKE_CASE )
A_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Dict:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[Any] = self.get_dummy_components()
A_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : Dict = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[Any] = output.rgb, output.depth
A_ : List[Any] = rgb[0, -3:, -3:, -1]
A_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A_ : Tuple = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
A_ : Union[str, Any] = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : List[str] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = 3 * [inputs['''prompt''']]
# forward
A_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[int] = output.rgb, output.depth
A_ : Tuple = rgb_slice_a[0, -3:, -3:, -1]
A_ : Optional[Any] = depth_slice_a[0, -3:, -1]
A_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = 3 * [inputs.pop('''prompt''' )]
A_ : Tuple = ldmad_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
A_ : Dict = text_inputs['''input_ids'''].to(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = ldmad_pipe.text_encoder(_SCREAMING_SNAKE_CASE )[0]
A_ : Optional[int] = prompt_embeds
# forward
A_ : Optional[int] = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Any = output.rgb, output.depth
A_ : Any = rgb_slice_a[0, -3:, -3:, -1]
A_ : str = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.get_dummy_components()
A_ : int = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
A_ : int = StableDiffusionLDMaDPipeline(**_SCREAMING_SNAKE_CASE )
A_ : str = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = '''french fries'''
A_ : Optional[Any] = ldmad_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[int] = output.rgb, output.depth
A_ : Optional[Any] = rgb[0, -3:, -3:, -1]
A_ : int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
A_ : int = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
A_ : Any = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : str = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
A_ : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
A_ : int = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
A_ : Optional[Any] = ldmad_pipe.to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : int = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Union[str, Any] = output.rgb, output.depth
A_ : int = rgb[0, -3:, -3:, -1].flatten()
A_ : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
A_ : Tuple = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
A_ : Tuple = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 )->int:
'''simple docstring'''
A_ : Optional[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Any = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 64, 64) )
A_ : str = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : List[Any] = output.rgb, output.depth
A_ : int = 0.4_9_5_5_8_6
A_ : Union[str, Any] = 0.3_3_7_9_5_5_1_5
A_ : Optional[int] = 1_1_2.4_8_5_1_8
A_ : Optional[Any] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Any = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_SCREAMING_SNAKE_CASE )
ldmad_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ldmad_pipe(**_SCREAMING_SNAKE_CASE )
A_ , A_ : Optional[Any] = output.rgb, output.depth
A_ : Tuple = 0.4_1_9_4_1_2_7
A_ : Union[str, Any] = 0.3_5_3_7_5_5_8_6
A_ : Union[str, Any] = 0.5_6_3_8_5_0_2
A_ : List[Any] = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 65 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A : List[str] ={
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict =[
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_A : Any =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_A = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
_A = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
_A = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any:
return float((preds == labels).mean() )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="binary" ) -> Union[str, Any]:
UpperCAmelCase__ : Optional[int] = simple_accuracy(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Dict = float(fa_score(y_true=lowerCAmelCase , y_pred=lowerCAmelCase , average=lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> List[str]:
UpperCAmelCase__ : List[str] = {}
for id_pred, label in zip(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
UpperCAmelCase__ : Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase__ : Optional[int] = [(pred, label)]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase__ , UpperCAmelCase__ : Dict = zip(*lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = fa_score(y_true=lowerCAmelCase , y_pred=lowerCAmelCase , average="""macro""" )
fas.append(lowerCAmelCase )
UpperCAmelCase__ : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase ) )
ems.append(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = float(sum(lowerCAmelCase ) / len(lowerCAmelCase ) )
UpperCAmelCase__ : Any = sum(lowerCAmelCase ) / len(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = float(fa_score(y_true=lowerCAmelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _a (self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase , _lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCAmelCase__ : Tuple = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCAmelCase__ : Any = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowerCamelCase , _lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowerCamelCase , _lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 166 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_lowerCamelCase , )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_lowerCamelCase , )
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCamelCase )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def a__ ( ) -> List[str]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@require_beam
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Dict = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a (self ):
"""simple docstring"""
import apache_beam as beam
UpperCAmelCase__ : Optional[int] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
UpperCAmelCase__ : int = partial(_lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _a (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=_lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = NestedBeamDataset(cache_dir=_lowerCamelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
UpperCAmelCase__ : str = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _lowerCamelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _lowerCamelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCamelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 166 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = random.Random()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1.0 , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=7 , lowercase=400 , lowercase=2_000 , lowercase=10 , lowercase=160 , lowercase=8 , lowercase=0.0 , lowercase=4_000 , lowercase=False , lowercase=True , ) -> List[str]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
lowerCAmelCase = feature_size
lowerCAmelCase = chunk_length
lowerCAmelCase = hop_length
def _snake_case ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _snake_case ( self , lowercase=False , lowercase=False ) -> Optional[Any]:
def _flatten(lowercase ):
return list(itertools.chain(*A_ ) )
if equal_length:
lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = WhisperFeatureExtractor if is_speech_available() else None
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = WhisperFeatureExtractionTester(self )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
lowerCAmelCase = self.feature_extraction_class.from_pretrained(A_ )
lowerCAmelCase = feat_extract_first.to_dict()
lowerCAmelCase = feat_extract_second.to_dict()
lowerCAmelCase = feat_extract_first.mel_filters
lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _snake_case ( self ) -> str:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase = os.path.join(A_ , """feat_extract.json""" )
feat_extract_first.to_json_file(A_ )
lowerCAmelCase = self.feature_extraction_class.from_json_file(A_ )
lowerCAmelCase = feat_extract_first.to_dict()
lowerCAmelCase = feat_extract_second.to_dict()
lowerCAmelCase = feat_extract_first.mel_filters
lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase = feature_extractor(A_ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase = feature_extractor(A_ , return_tensors="""np""" ).input_features
lowerCAmelCase = feature_extractor(A_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase = np.asarray(A_ )
lowerCAmelCase = feature_extractor(A_ , return_tensors="""np""" ).input_features
lowerCAmelCase = feature_extractor(A_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test truncation required
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowerCAmelCase = [np.asarray(A_ ) for speech_input in speech_inputs]
lowerCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowerCAmelCase = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
lowerCAmelCase = feature_extractor(A_ , return_tensors="""np""" ).input_features
lowerCAmelCase = feature_extractor(A_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def _snake_case ( self ) -> Dict:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort("""id""" ).select(range(A_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _snake_case ( self ) -> int:
lowerCAmelCase = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = WhisperFeatureExtractor()
lowerCAmelCase = feature_extractor(A_ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = self._load_datasamples(1 )[0]
lowerCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowerCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
| 46 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
_UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = (3, 32, 128)
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowerCAmelCase : Optional[int] = dict(zip(A_ , range(len(A_ ) ) ) )
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
__lowerCAmelCase : Union[str, Any] = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def UpperCamelCase__ ( self , **A_ ) ->Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self , **A_ ) ->Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__lowerCAmelCase : str = Image.fromarray(np.moveaxis(A_ , 0 , -1 ) )
return image_input
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : List[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase : int = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__lowerCAmelCase : int = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[Any] = image_processor(A_ , return_tensors='''np''' )
__lowerCAmelCase : Tuple = processor(images=A_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Any = '''test'''
__lowerCAmelCase : Dict = processor(text=A_ )
__lowerCAmelCase : str = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : str = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[Any] = '''test'''
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Optional[int] = processor.char_decode(A_ )
__lowerCAmelCase : Tuple = tokenizer.batch_decode(A_ )
__lowerCAmelCase : Any = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(A_ , A_ )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : int = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCAmelCase : List[Any] = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.get_image_processor()
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : Any = MgpstrProcessor(tokenizer=A_ , image_processor=A_ )
__lowerCAmelCase : List[Any] = torch.randn(1 , 27 , 38 )
__lowerCAmelCase : Optional[int] = torch.randn(1 , 27 , 5_0257 )
__lowerCAmelCase : Optional[Any] = torch.randn(1 , 27 , 3_0522 )
__lowerCAmelCase : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 275 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 228 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__UpperCAmelCase = {
'''junnyu/roformer_chinese_small''': 1_536,
'''junnyu/roformer_chinese_base''': 1_536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
__UpperCAmelCase = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = VOCAB_FILES_NAMES
lowercase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Tuple = RoFormerTokenizer
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_="[UNK]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="[PAD]" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Tuple:
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , lowerCamelCase_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , lowerCamelCase_ ) != strip_accents
):
lowerCAmelCase__ = getattr(lowerCamelCase_ , pre_tok_state.pop('''type''' ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = pre_tok_class(**lowerCamelCase_ )
lowerCAmelCase__ = do_lower_case
def __getstate__( self ) -> Any:
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = BertPreTokenizer()
return state
def __setstate__( self , lowerCamelCase_ ) -> List[Any]:
lowerCAmelCase__ = d
lowerCAmelCase__ = self.__dict__['''_tokenizer'''].get_vocab()
lowerCAmelCase__ = PreTokenizer.custom(JiebaPreTokenizer(lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None ) -> Union[str, Any]:
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
lowerCAmelCase__ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Union[str, Any]:
lowerCAmelCase__ = BertPreTokenizer()
return super().save_pretrained(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
| 228 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase , unittest.TestCase ):
__magic_name__: Optional[int] = BlenderbotSmallTokenizer
__magic_name__: List[Any] = False
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
super().setUp()
snake_case_ : str = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
snake_case_ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
snake_case_ : str = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
snake_case_ : List[str] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
def UpperCAmelCase_ ( self : str , **_A : Any ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCAmelCase_ ( self : List[Any] , _A : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = 'adapt act apte'
snake_case_ : Any = 'adapt act apte'
return input_text, output_text
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ : Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = 'adapt act apte'
snake_case_ : List[Any] = ['adapt', 'act', 'ap@@', 'te']
snake_case_ : List[str] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ : Optional[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case_ : Tuple = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
snake_case_ : Tuple = 'I am a small frog.'
snake_case_ : List[Any] = tok([src_text] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )['input_ids']
snake_case_ : Optional[int] = tok.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
snake_case_ : List[str] = 'I am a small frog .'
snake_case_ : Dict = '.'
snake_case_ : Tuple = tok(UpperCAmelCase_ )['input_ids']
snake_case_ : List[Any] = tok(UpperCAmelCase_ )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 327 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(UpperCAmelCase_ )
def UpperCamelCase ( self: str , **UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self: Optional[Any] , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Tuple=None , **UpperCAmelCase_: Any ):
'''simple docstring'''
return super().__call__(UpperCAmelCase_ , *UpperCAmelCase_ , num_workers=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[str] , UpperCAmelCase_: Dict=64 , UpperCAmelCase_: int = 0 , UpperCAmelCase_: float = 512 / 1_500 , UpperCAmelCase_: Optional[int] = 32 , UpperCAmelCase_: Optional[int] = 1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(UpperCAmelCase_ , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Optional[Any]=0.88 , UpperCAmelCase_: Dict=0.95 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: str=1 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , binarize=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str]=False , UpperCAmelCase_: str=False , UpperCAmelCase_: Any=0.7 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 306 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
__snake_case : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__snake_case : Optional[str] = field(
default="tab_fact" ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ,)
__snake_case : int = field(
default=1024 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__snake_case : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A csv or a json file containing the training data."} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "A csv or a json file containing the validation data."} )
__snake_case : Optional[str] = field(default=_UpperCAmelCase ,metadata={"help": "A csv or a json file containing the test data."} )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
_SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __UpperCAmelCase :
__snake_case : str = field(
default=_UpperCAmelCase ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__snake_case : Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
__snake_case : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__snake_case : bool = field(
default=_UpperCAmelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
_SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
_SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
_SCREAMING_SNAKE_CASE = load_dataset("""csv""" ,data_files=snake_case__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_SCREAMING_SNAKE_CASE = load_dataset("""json""" ,data_files=snake_case__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
_SCREAMING_SNAKE_CASE = len(snake_case__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=snake_case__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
_SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=snake_case__ ,)
_SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=snake_case__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
_SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
_SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_SCREAMING_SNAKE_CASE = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(snake_case__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(snake_case__ ):
_SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
_SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
_SCREAMING_SNAKE_CASE = examples["""statement"""]
_SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas ,examples["""table_text"""] ) )
_SCREAMING_SNAKE_CASE = tokenizer(snake_case__ ,snake_case__ ,padding=snake_case__ ,max_length=snake_case__ ,truncation=snake_case__ )
_SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
_SCREAMING_SNAKE_CASE = raw_datasets.map(
snake_case__ ,batched=snake_case__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="""Running tokenizer on dataset""" ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
_SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
_SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(snake_case__ ) ) ,3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ ):
_SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions ,snake_case__ ) else p.predictions
_SCREAMING_SNAKE_CASE = np.argmax(snake_case__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
_SCREAMING_SNAKE_CASE = DataCollatorWithPadding(snake_case__ ,pad_to_multiple_of=8 )
else:
_SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=snake_case__ ,args=snake_case__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=snake_case__ ,tokenizer=snake_case__ ,data_collator=snake_case__ ,)
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=snake_case__ )
_SCREAMING_SNAKE_CASE = train_result.metrics
_SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
_SCREAMING_SNAKE_CASE = min(snake_case__ ,len(snake_case__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" ,snake_case__ )
trainer.save_metrics("""train""" ,snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=snake_case__ )
_SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
_SCREAMING_SNAKE_CASE = min(snake_case__ ,len(snake_case__ ) )
trainer.log_metrics("""eval""" ,snake_case__ )
trainer.save_metrics("""eval""" ,snake_case__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
_SCREAMING_SNAKE_CASE = trainer.predict(snake_case__ ,metric_key_prefix="""predict""" ).predictions
_SCREAMING_SNAKE_CASE = np.argmax(snake_case__ ,axis=1 )
_SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir ,"""predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(snake_case__ ,"""w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = label_list[item]
writer.write(F'{index}\t{item}\n' )
_SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 365 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTConfig(image_size=3_84 ,qkv_bias=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """relu"""
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
# load HuggingFace model
_SCREAMING_SNAKE_CASE = ViTModel(snake_case__ ,add_pooling_layer=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRForCausalLM(snake_case__ )
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=snake_case__ ,decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ ,map_location="""cpu""" ,check_hash=snake_case__ )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(snake_case__ ,snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
read_in_q_k_v(snake_case__ ,snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_SCREAMING_SNAKE_CASE = val
else:
_SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" )
_SCREAMING_SNAKE_CASE = TrOCRProcessor(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img(snake_case__ ) ,return_tensors="""pt""" ).pixel_values
# verify logits
_SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_SCREAMING_SNAKE_CASE = model(pixel_values=snake_case__ ,decoder_input_ids=snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,snake_case__ ,atol=1e-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 125 | 0 |
import logging
import os
from .state import PartialState
class A ( logging.LoggerAdapter ):
@staticmethod
def lowercase_ (__UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : str , **__UpperCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
UpperCAmelCase__ = kwargs.pop("main_process_only" , __UpperCAmelCase )
UpperCAmelCase__ = kwargs.pop("in_order" , __UpperCAmelCase )
if self.isEnabledFor(__UpperCAmelCase ):
if self._should_log(__UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = self.process(__UpperCAmelCase , __UpperCAmelCase )
self.logger.log(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif in_order:
UpperCAmelCase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCAmelCase__ , UpperCAmelCase__ = self.process(__UpperCAmelCase , __UpperCAmelCase )
self.logger.log(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
state.wait_for_everyone()
def lowerCAmelCase_ ( __A, __A = None ) -> Dict:
'''simple docstring'''
if log_level is None:
UpperCAmelCase__ = os.environ.get("ACCELERATE_LOG_LEVEL", __A )
UpperCAmelCase__ = logging.getLogger(__A )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__A, {} )
| 65 |
from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase_ = True
except (ImportError, AttributeError):
UpperCamelCase_ = object
def UpperCamelCase ( *UpperCAmelCase , **UpperCAmelCase ) ->Dict:
"""simple docstring"""
pass
UpperCamelCase_ = False
UpperCamelCase_ = logging.get_logger('transformers-cli/serving')
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(UpperCAmelCase , args.host , args.port , args.workers )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : dict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str]
a_ : Optional[List[int]]
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : str
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Any
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def UpperCAmelCase__ ( __UpperCAmelCase) ->int:
a_ = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints.")
serve_parser.add_argument(
"--task" , type=__UpperCAmelCase , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=__UpperCAmelCase , default="localhost" , help="Interface the server will listen on.")
serve_parser.add_argument("--port" , type=__UpperCAmelCase , default=88_88 , help="Port the serving will listen to.")
serve_parser.add_argument("--workers" , type=__UpperCAmelCase , default=1 , help="Number of http workers")
serve_parser.add_argument("--model" , type=__UpperCAmelCase , help="Model's name or path to stored model.")
serve_parser.add_argument("--config" , type=__UpperCAmelCase , help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer" , type=__UpperCAmelCase , help="Tokenizer name to use.")
serve_parser.add_argument(
"--device" , type=__UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=__UpperCAmelCase)
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Any:
a_ = pipeline
a_ = host
a_ = port
a_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately.")
else:
logger.info(F'''Serving model over {host}:{port}''')
a_ = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
] , timeout=6_00 , )
def UpperCAmelCase__ ( self) ->Optional[Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers)
def UpperCAmelCase__ ( self) ->Tuple:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def UpperCAmelCase__ ( self , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase)) ->str:
try:
a_ = self._pipeline.tokenizer.tokenize(__UpperCAmelCase)
if return_ids:
a_ = self._pipeline.tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
return ServeTokenizeResult(tokens=__UpperCAmelCase , tokens_ids=__UpperCAmelCase)
else:
return ServeTokenizeResult(tokens=__UpperCAmelCase)
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(__UpperCAmelCase)})
def UpperCAmelCase__ ( self , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , ) ->List[str]:
try:
a_ = self._pipeline.tokenizer.decode(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return ServeDeTokenizeResult(model="" , text=__UpperCAmelCase)
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(__UpperCAmelCase)})
async def UpperCAmelCase__ ( self , __UpperCAmelCase=Body(__UpperCAmelCase , embed=__UpperCAmelCase)) ->int:
# Check we don't have empty string
if len(__UpperCAmelCase) == 0:
return ServeForwardResult(output=[] , attention=[])
try:
# Forward through the model
a_ = self._pipeline(__UpperCAmelCase)
return ServeForwardResult(output=__UpperCAmelCase)
except Exception as e:
raise HTTPException(5_00 , {"error": str(__UpperCAmelCase)})
| 303 |
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303 | 1 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Dict , *_lowerCAmelCase : str , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[str]):
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =eval_examples
__lowercase =post_process_function
def __lowerCamelCase ( self : int , _lowerCAmelCase : Optional[Dataset] = None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : str = "eval" , **_lowerCAmelCase : Any , ):
'''simple docstring'''
__lowercase =gen_kwargs.copy()
__lowercase =(
gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length
)
__lowercase =(
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams
)
__lowercase =gen_kwargs
__lowercase =self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase =self.get_eval_dataloader(_lowerCAmelCase)
__lowercase =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase =self.compute_metrics
__lowercase =None
__lowercase =time.time()
__lowercase =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase =eval_loop(
_lowerCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
__lowercase =compute_metrics
__lowercase =self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowercase =self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
__lowercase =self.compute_metrics(_lowerCAmelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
__lowercase =metrics.pop(_lowerCAmelCase)
metrics.update(output.metrics)
else:
__lowercase =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
__lowercase =self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase)
return metrics
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : str = "test" , **_lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =gen_kwargs.copy()
__lowercase =self.get_test_dataloader(_lowerCAmelCase)
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase =self.compute_metrics
__lowercase =None
__lowercase =time.time()
__lowercase =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase =eval_loop(
_lowerCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
__lowercase =compute_metrics
__lowercase =self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase =self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 'predict')
__lowercase =self.compute_metrics(_lowerCAmelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"""{metric_key_prefix}_"""):
__lowercase =metrics.pop(_lowerCAmelCase)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase)
| 166 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[True] * limit
__lowercase =False
__lowercase =False
__lowercase =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__lowercase =i * 2
while index < limit:
__lowercase =False
__lowercase =index + i
__lowercase =[2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def _A ( _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =prime_sieve(_lowerCAmelCase )
__lowercase =0
__lowercase =0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
__lowercase =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__lowercase =j - i
__lowercase =sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 166 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A : Optional[int] = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 |
def _a ( UpperCAmelCase ) -> bool:
"""simple docstring"""
return str(UpperCAmelCase ) == str(UpperCAmelCase )[::-1]
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
return int(UpperCAmelCase ) + int(str(UpperCAmelCase )[::-1] )
def _a ( UpperCAmelCase = 10000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : Tuple = []
for num in range(1 , UpperCAmelCase ):
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Union[str, Any] = num
while iterations < 50:
lowerCamelCase__ : Dict = sum_reverse(UpperCAmelCase )
iterations += 1
if is_palindrome(UpperCAmelCase ):
break
else:
lychrel_nums.append(UpperCAmelCase )
return len(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> int:
if attention_mask is None:
a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__lowerCamelCase )
if decoder_head_mask is None:
a = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCamelCase )
if cross_attn_head_mask is None:
a = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __lowerCAmelCase :
def __init__( self :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :int=13 , __magic_name__ :Optional[int]=7 , __magic_name__ :Tuple=True , __magic_name__ :Optional[int]=False , __magic_name__ :List[Any]=99 , __magic_name__ :List[str]=16 , __magic_name__ :Dict=2 , __magic_name__ :List[Any]=4 , __magic_name__ :Dict=4 , __magic_name__ :Optional[Any]="relu" , __magic_name__ :int=0.1 , __magic_name__ :str=0.1 , __magic_name__ :Any=0.0 , __magic_name__ :Optional[int]=0.0 , __magic_name__ :Any=20 , __magic_name__ :Dict=2 , __magic_name__ :Dict=1 , __magic_name__ :Any=0 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = encoder_layerdrop
a = decoder_layerdrop
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.eos_token_id # Eos Token
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a = input_ids.clamp(self.pad_token_id + 1 )
a = decoder_input_ids.clamp(self.pad_token_id + 1 )
a = self.get_config()
a = prepare_mam_aaa_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a , a = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self :Tuple , __magic_name__ :List[Any] , __magic_name__ :Any ):
'''simple docstring'''
a = MaMaaaModel(config=__magic_name__ ).get_decoder().to(__magic_name__ ).eval()
a = inputs_dict["""input_ids"""]
a = inputs_dict["""attention_mask"""]
a = inputs_dict["""head_mask"""]
# first forward pass
a = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
a , a = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
a = model(__magic_name__ , attention_mask=__magic_name__ )["""last_hidden_state"""]
a = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[
"""last_hidden_state"""
]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -3:, random_slice_idx].detach()
a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-2 ) )
def lowerCamelCase__ ( self :Dict , __magic_name__ :str , __magic_name__ :int ):
'''simple docstring'''
a = MaMaaaModel(config=__magic_name__ ).to(__magic_name__ ).eval()
a = model(**__magic_name__ )
a = outputs.encoder_last_hidden_state
a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a = model.get_encoder()
encoder.save_pretrained(__magic_name__ )
a = MaMaaaEncoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
a = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
a = model.get_decoder()
decoder.save_pretrained(__magic_name__ )
a = MaMaaaDecoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
a = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__magic_name__ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase__ = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :List[str] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :List[str] , __magic_name__ :str ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = MaMaaaModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
a , a = model_class.from_pretrained(__magic_name__ , output_loading_info=__magic_name__ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__magic_name__ )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__magic_name__ )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
a = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = copy.deepcopy(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
if not self.is_encoder_decoder:
a = inputs["""input_ids"""]
del inputs["input_ids"]
else:
a = inputs["""input_ids"""]
a = inputs.get("""decoder_input_ids""" , __magic_name__ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __magic_name__ )
a = model.get_input_embeddings()
if not self.is_encoder_decoder:
a = wte(__magic_name__ )
else:
a = wte(__magic_name__ )
a = wte(__magic_name__ )
with torch.no_grad():
model(**__magic_name__ )[0]
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs()
a = input_dict["""input_ids"""]
a = input_ids.ne(1 ).to(__magic_name__ )
a = MaMaaaForConditionalGeneration(__magic_name__ ).eval().to(__magic_name__ )
if torch_device == "cuda":
model.half()
model.generate(__magic_name__ , attention_mask=__magic_name__ )
model.generate(num_beams=4 , do_sample=__magic_name__ , early_stopping=__magic_name__ , num_return_sequences=3 )
def __A ( __lowerCamelCase ) -> List[Any]:
return torch.tensor(__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase )
__UpperCamelCase : Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__magic_name__ )
a = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
a = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
a = prepare_mam_aaa_inputs_dict(model.config , __magic_name__ , __magic_name__ )
with torch.no_grad():
a = model(**__magic_name__ )[0]
a = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , __magic_name__ )
# change to expected output here
a = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__magic_name__ )
# change to intended input
a = _long_tensor([[12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38, 2]] )
a = _long_tensor([[2, 12_8028, 98, 12, 3_0527, 2732, 159, 7755, 6_1904, 3_9144, 38]] )
a = prepare_mam_aaa_inputs_dict(model.config , __magic_name__ , __magic_name__ )
with torch.no_grad():
a = model(**__magic_name__ )[0]
a = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __magic_name__ )
# change to expected output here
a = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__magic_name__ )
a = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
a = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
a = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
a = model.generate(
input_ids=dct["""input_ids"""].to(__magic_name__ ) , attention_mask=dct["""attention_mask"""].to(__magic_name__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
a = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
a = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__magic_name__ , skip_special_tokens=__magic_name__ )
assert generated == expected_en
| 228 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __A ( __lowerCamelCase ) -> List[str]:
a = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
a = line.strip()
if line:
a = line.split()
a = line_number
a = words[0]
a = value
return result
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
for attribute in key.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
a = hf_pointer
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = shape_pointer.shape
# let's reduce dimension
a = value[0]
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a = """.""".join([key, hf_param_name] )
else:
a = key
a = value if """lm_head""" in full_key else value[0]
__UpperCamelCase : List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Optional[Any]:
a = False
for key, mapped_key in MAPPING.items():
a = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = """weight"""
else:
a = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = []
a = fairseq_model.state_dict()
a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a = True
else:
a = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ) -> List[Any]:
if config_path is not None:
a = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
a = WavaVecaConfig()
if is_seq_class:
a = read_txt_into_dict(__lowerCamelCase )
a = idalabel
a = WavaVecaForSequenceClassification(__lowerCamelCase )
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
a = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.eos_index
a = len(target_dict.symbols )
a = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = target_dict.indices
# fairseq has the <pad> and <s> switched
a = 0
a = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
a = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
a = True if config.feat_extract_norm == """layer""" else False
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
a = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
a = WavaVecaForCTC(__lowerCamelCase )
else:
a = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
a = argparse.Namespace(task="""audio_pretraining""" )
a = fairseq.tasks.setup_task(__lowerCamelCase )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
a = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 228 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Dict =DebertaVaTokenizer
lowerCamelCase : Optional[Any] =DebertaVaTokenizerFast
lowerCamelCase : List[str] =True
lowerCamelCase : Any =True
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : List[Any] = DebertaVaTokenizer(lowerCAmelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """this is a test"""
__lowerCAmelCase : Tuple = """this is a test"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """<pad>"""
__lowerCAmelCase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(lowerCAmelCase ) , 3_00_01 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase : Tuple = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowerCAmelCase : Optional[Any] = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = DebertaVaTokenizerFast(lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
"""simple docstring"""
__lowerCAmelCase : Any = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : Optional[int] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : Union[str, Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase : Optional[Any] = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = """ \tHeLLo!how \n Are yoU? """
__lowerCAmelCase : Any = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowerCAmelCase : Tuple = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Tuple = self.get_rust_tokenizer()
__lowerCAmelCase : int = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
__lowerCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
__lowerCAmelCase : int = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
__lowerCAmelCase : Any = tokenizer.encode(lowerCAmelCase )
__lowerCAmelCase : str = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
"""simple docstring"""
__lowerCAmelCase : int = """This is a test"""
__lowerCAmelCase : List[Any] = [13, 1, 43_98, 25, 21, 12_89]
__lowerCAmelCase : Dict = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase : Dict = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowerCAmelCase : Dict = DebertaVaTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
__lowerCAmelCase : Any = DebertaVaTokenizerFast(lowerCAmelCase , keep_accents=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# fmt: off
__lowerCAmelCase : List[Any] = """I was born in 92000, and this is falsé."""
__lowerCAmelCase : List[Any] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
__lowerCAmelCase : Union[str, Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowerCAmelCase : str = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowerCAmelCase : Any = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = DebertaVaTokenizer(lowerCAmelCase )
__lowerCAmelCase : Tuple = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase : int = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase , )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {"""input_ids""": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 370 |
def snake_case_ () -> List[Any]:
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def snake_case_ (__A : Dict ) -> Tuple:
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[int] = 2
while i * i <= n:
__lowerCAmelCase : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case_ () -> Dict:
return next(i for i in triangle_number_generator() if count_divisors(__A ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 139 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int:
"""simple docstring"""
UpperCamelCase :Any = prime_factors(SCREAMING_SNAKE_CASE__ )
if is_square_free(SCREAMING_SNAKE_CASE__ ):
return -1 if len(SCREAMING_SNAKE_CASE__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : str , __magic_name__ : CLIPSegForImageSegmentation , __magic_name__ : CLIPSegProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> str:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Optional[int] = dict(scheduler.config )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = FrozenDict(__magic_name__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Dict = dict(scheduler.config )
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = FrozenDict(__magic_name__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__magic_name__ , segmentation_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ : Tuple = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : str , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCAmelCase_ : int = self.segmentation_model(**__magic_name__ )
UpperCAmelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , )
| 125 | 0 |
from collections.abc import Iterable
from typing import Generic, TypeVar
a : Dict = TypeVar('_T')
class _a ( Generic[_T] ):
def __init__(self, SCREAMING_SNAKE_CASE_ = None ) -> None:
UpperCAmelCase_: list[_T] = list(iterable or [] )
UpperCAmelCase_: list[_T] = []
def __len__(self ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__(self ) -> str:
return f'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
self._stacka.append(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> _T:
UpperCAmelCase_: Optional[int] = self._stacka.pop
UpperCAmelCase_: List[Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : Dict = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a : str = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
a : Optional[int] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = SqueezeBertTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Optional[Any] = do_lower_case
UpperCAmelCase_: int = strip_accents
UpperCAmelCase_: int = tokenize_chinese_chars
UpperCAmelCase_: List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: List[Any] = [self.sep_token_id]
UpperCAmelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _A : List[Any] , _A : List[Any]=2 , _A : Any=3 , _A : Union[str, Any]=4 , _A : int=2 , _A : Dict=7 , _A : Dict=True , _A : int=True , _A : List[str]=True , _A : Tuple=True , _A : Tuple=99 , _A : Any=36 , _A : List[Any]=3 , _A : Optional[int]=4 , _A : Any=37 , _A : int="gelu" , _A : int=0.1 , _A : Tuple=0.1 , _A : Dict=512 , _A : Dict=16 , _A : Optional[Any]=2 , _A : List[str]=0.02 , _A : Any=6 , _A : Any=6 , _A : Optional[Any]=3 , _A : str=4 , _A : Optional[Any]=None , _A : List[str]=1000 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[str] = batch_size
__SCREAMING_SNAKE_CASE : Tuple = num_channels
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : Any = patch_size
__SCREAMING_SNAKE_CASE : Dict = text_seq_length
__SCREAMING_SNAKE_CASE : Optional[int] = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : str = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Tuple = vocab_size
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : str = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = coordinate_size
__SCREAMING_SNAKE_CASE : Any = shape_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
__SCREAMING_SNAKE_CASE : str = num_choices
__SCREAMING_SNAKE_CASE : Tuple = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__SCREAMING_SNAKE_CASE : Dict = text_seq_length
__SCREAMING_SNAKE_CASE : str = (image_size // patch_size) ** 2 + 1
__SCREAMING_SNAKE_CASE : List[Any] = self.text_seq_length + self.image_seq_length
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 3]
__SCREAMING_SNAKE_CASE : Tuple = bbox[i, j, 1]
__SCREAMING_SNAKE_CASE : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[i, j, 2]
__SCREAMING_SNAKE_CASE : Tuple = bbox[i, j, 0]
__SCREAMING_SNAKE_CASE : str = t
__SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Optional[int] , _A : List[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : Tuple , _A : List[str] , _A : str , _A : Union[str, Any] , _A : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = LayoutLMvaModel(config=_A )
model.to(_A )
model.eval()
# text + image
__SCREAMING_SNAKE_CASE : Dict = model(_A , pixel_values=_A )
__SCREAMING_SNAKE_CASE : Dict = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A )
__SCREAMING_SNAKE_CASE : Tuple = model(_A , bbox=_A , pixel_values=_A , token_type_ids=_A )
__SCREAMING_SNAKE_CASE : int = model(_A , bbox=_A , pixel_values=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__SCREAMING_SNAKE_CASE : List[str] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : str , _A : str , _A : str , _A : Union[str, Any] , _A : Dict , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaForSequenceClassification(_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : Tuple , _A : Optional[int] , _A : Tuple , _A : Optional[int] , _A : Optional[int] , _A : int , _A : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.num_labels
__SCREAMING_SNAKE_CASE : Any = LayoutLMvaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : Union[str, Any] , _A : List[str] , _A : int , _A : str , _A : int , _A : List[Any] , _A : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = LayoutLMvaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : str = config_and_inputs
__SCREAMING_SNAKE_CASE : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self : Dict , _A : Tuple , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : int ):
"""simple docstring"""
return True
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = LayoutLMvaModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
__SCREAMING_SNAKE_CASE : List[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
__SCREAMING_SNAKE_CASE : Dict = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in get_values(_A ):
__SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in [
*get_values(_A ),
]:
__SCREAMING_SNAKE_CASE : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
elif model_class in [
*get_values(_A ),
]:
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_A , )
return inputs_dict
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE : Dict = type
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_A )
__SCREAMING_SNAKE_CASE : Any = self.default_image_processor
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=_A , return_tensors='''pt''' ).pixel_values.to(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__SCREAMING_SNAKE_CASE : Any = model(
input_ids=input_ids.to(_A ) , bbox=bbox.to(_A ) , pixel_values=pixel_values.to(_A ) , )
# verify the logits
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(_A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 303 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ort.SessionOptions()
_lowerCAmelCase = False
return options
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = """A red cat sitting on a park bench"""
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type="""np""" , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
_lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = """A red cat sitting on a park bench"""
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type="""np""" , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 229 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = 384
_lowerCAmelCase = 2
_lowerCAmelCase = 4
_lowerCAmelCase = 37
_lowerCAmelCase = """gelu"""
_lowerCAmelCase = 0.1
_lowerCAmelCase = 0.1
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 2
_lowerCAmelCase = 0.02
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = 2
_lowerCAmelCase = 9
_lowerCAmelCase = 1
_lowerCAmelCase = None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel(config=_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForMaskedLM(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForSequenceClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFConvBertForMultipleChoice(config=_lowercase )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForTokenClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForQuestionAnswering(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : Dict = False
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = True
if hasattr(_lowercase , """use_cache""" ):
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
for model_class in self.all_model_classes:
_lowerCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
_lowerCAmelCase = os.path.join(_lowercase , """saved_model""" , """1""" )
_lowerCAmelCase = tf.keras.models.load_model(_lowercase )
_lowerCAmelCase = model(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = outputs["""encoder_hidden_states"""]
_lowerCAmelCase = outputs["""encoder_attentions"""]
else:
_lowerCAmelCase = outputs["""hidden_states"""]
_lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(_lowercase ) , _lowercase )
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
def check_decoder_attentions_output(_lowercase ):
_lowerCAmelCase = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
_lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowercase ):
_lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(_lowercase )[0]
_lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
_lowerCAmelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 229 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : List[str] = 2_5_0_0_0_4
a : List[str] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = MBartTokenizer
lowercase = MBartTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A )
UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A )
UpperCAmelCase : int = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A )
UpperCAmelCase : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = 'facebook/mbart-large-en-ro'
lowercase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def _lowercase( cls ) -> Tuple:
UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase : int = 1
return cls
def _lowercase( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def _lowercase( self ) -> List[str]:
self.assertIn(A , self.tokenizer.all_special_ids )
UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A )
UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
UpperCAmelCase : int = 10
UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def _lowercase( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = tempfile.mkdtemp()
UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase : Dict = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase : Dict = targets["""input_ids"""]
UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3034, 2, 250004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 265 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a_ ( __lowercase ):
'''simple docstring'''
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_a , 'width_multiplier'))
class a_ :
'''simple docstring'''
def __init__( self : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any]=13 , lowercase__ : Optional[int]=64 , lowercase__ : Dict=2 , lowercase__ : str=3 , lowercase__ : Any="swish" , lowercase__ : Dict=3 , lowercase__ : Optional[int]=32 , lowercase__ : int=0.1 , lowercase__ : List[str]=0.02 , lowercase__ : Union[str, Any]=True , lowercase__ : str=True , lowercase__ : Dict=10 , lowercase__ : List[str]=None , lowercase__ : Optional[Any]=0.25 , lowercase__ : Optional[int]=0.0 , lowercase__ : List[str]=0.0 , ):
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = make_divisible(512 * width_multiplier , divisor=8)
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = conv_kernel_size
lowerCAmelCase__ = output_stride
lowerCAmelCase__ = classifier_dropout_prob
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = width_multiplier
lowerCAmelCase__ = ffn_dropout
lowerCAmelCase__ = attn_dropout
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels)
lowerCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : Any):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __snake_case ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Dict , lowercase__ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = MobileViTVaModel(config=_a)
model.to(_a)
model.eval()
lowerCAmelCase__ = model(_a)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MobileViTVaForImageClassification(_a)
model.to(_a)
model.eval()
lowerCAmelCase__ = model(_a , labels=_a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : str , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : int):
'''simple docstring'''
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation(_a)
model.to(_a)
model.eval()
lowerCAmelCase__ = model(_a)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase__ = model(_a , labels=_a)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = MobileViTVaModelTester(self)
lowerCAmelCase__ = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a)
def __snake_case ( self : Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def __snake_case ( self : Tuple):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def __snake_case ( self : List[str]):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def __snake_case ( self : List[str]):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __snake_case ( self : Any):
'''simple docstring'''
pass
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_a)
lowerCAmelCase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a)
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a)
def __snake_case ( self : Dict):
'''simple docstring'''
def check_hidden_states_output(lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[Any]):
lowerCAmelCase__ = model_class(_a)
model.to(_a)
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(_a , _a))
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = 5
self.assertEqual(len(_a) , _a)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase__ = 2
for i in range(len(_a)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(_a , _a , _a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(_a , _a , _a)
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a)
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a)
@slow
def __snake_case ( self : str):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = MobileViTVaModel.from_pretrained(_a)
self.assertIsNotNone(_a)
def __lowerCamelCase ( ):
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
_a)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_a , return_tensors='pt').to(_a)
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**_a)
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , _a)
lowerCAmelCase__ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01]).to(_a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4))
@slow
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
lowerCAmelCase__ = model.to(_a)
lowerCAmelCase__ = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_a , return_tensors='pt').to(_a)
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**_a)
lowerCAmelCase__ = outputs.logits
# verify the logits
lowerCAmelCase__ = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , _a)
lowerCAmelCase__ = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4))
@slow
def __snake_case ( self : Any):
'''simple docstring'''
lowerCAmelCase__ = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
lowerCAmelCase__ = model.to(_a)
lowerCAmelCase__ = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_a , return_tensors='pt').to(_a)
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**_a)
lowerCAmelCase__ = outputs.logits.detach().cpu()
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)])
lowerCAmelCase__ = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , _a)
lowerCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=_a)
lowerCAmelCase__ = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , _a)
| 365 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 1_6 ):
lowerCAmelCase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
lowerCAmelCase__ , padding='longest' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase__ ) == "1":
lowerCAmelCase__ = 2
# Initialize accelerator
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config['lr']
lowerCAmelCase__ = int(config['num_epochs'] )
lowerCAmelCase__ = int(config['seed'] )
lowerCAmelCase__ = int(config['batch_size'] )
lowerCAmelCase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.loss
lowerCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowerCAmelCase__ = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**lowerCAmelCase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowerCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase__ )
def __lowerCamelCase ( ):
lowerCAmelCase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 119 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( _a ):
lowerCAmelCase_ : List[Any] = ['''pixel_values''']
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : str , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ , param_name='crop_size' )
lowerCAmelCase : int = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : Tuple = resample
lowerCAmelCase : List[str] = do_center_crop
lowerCAmelCase : Optional[Any] = crop_size
lowerCAmelCase : List[Any] = do_rescale
lowerCAmelCase : Any = rescale_factor
lowerCAmelCase : List[str] = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase : int = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase : List[Any] = do_convert_rgb
def lowercase__ ( self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
lowerCAmelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : str , ):
lowerCAmelCase : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ):
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Tuple = size if size is not None else self.size
lowerCAmelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='size' , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Any = resample if resample is not None else self.resample
lowerCAmelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : int = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : int = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase : Any = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase : Optional[int] = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase : int = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowerCAmelCase : int = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
lowerCAmelCase : Optional[int] = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowerCAmelCase : str = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowerCAmelCase : Any = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase : Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 138 |
'''simple docstring'''
def A_ ( snake_case ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
SCREAMING_SNAKE_CASE:Optional[int] = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
A_ = input("Enter a string ").strip()
A_ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 139 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : Optional[NestedDataStructureLike[PathLike]] = None , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : Union[str, Any] , ) -> Union[str, Any]:
__magic_name__ : str = path_or_paths
__magic_name__ : Any = split if split or isinstance(_A , _A ) else 'train'
__magic_name__ : Dict = features
__magic_name__ : List[Any] = cache_dir
__magic_name__ : List[str] = keep_in_memory
__magic_name__ : Optional[Any] = streaming
__magic_name__ : int = num_proc
__magic_name__ : Optional[Any] = kwargs
@abstractmethod
def __lowerCAmelCase ( self : Optional[int] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Tuple , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : str , ) -> str:
__magic_name__ : int = features
__magic_name__ : List[str] = cache_dir
__magic_name__ : List[Any] = keep_in_memory
__magic_name__ : str = streaming
__magic_name__ : Optional[Any] = num_proc
__magic_name__ : Dict = kwargs
@abstractmethod
def __lowerCAmelCase ( self : int ) -> Union[Dataset, IterableDataset]:
pass
| 275 |
'''simple docstring'''
import math
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
return math.pow(lowerCAmelCase , 2 ) - a
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
return 2 * x
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
__magic_name__ : List[Any] = 2.0
while start <= a:
__magic_name__ : List[str] = math.pow(lowerCAmelCase , 2 )
return start
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : int = 9999 , lowerCAmelCase : float = 0.00_0000_0000_0001 ):
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
__magic_name__ : Any = get_initial_point(lowerCAmelCase )
for _ in range(lowerCAmelCase ):
__magic_name__ : List[str] = value
__magic_name__ : Optional[int] = value - fx(lowerCAmelCase , lowerCAmelCase ) / fx_derivative(lowerCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 275 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A__ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , _snake_case ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = nn.ModuleList(_snake_case )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(_snake_case , _snake_case , self.nets ) ):
_lowerCAmelCase , _lowerCAmelCase = controlnet(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
# merge samples
if i == 0:
_lowerCAmelCase , _lowerCAmelCase = down_samples, mid_sample
else:
_lowerCAmelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_snake_case , _snake_case )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def snake_case ( self , _snake_case , _snake_case = True , _snake_case = None , _snake_case = False , _snake_case = None , ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_snake_case , is_main_process=_snake_case , save_function=_snake_case , safe_serialization=_snake_case , variant=_snake_case , )
idx += 1
_lowerCAmelCase = model_path_to_save + F'_{idx}'
@classmethod
def snake_case ( cls , _snake_case , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCAmelCase = pretrained_model_path
while os.path.isdir(_snake_case ):
_lowerCAmelCase = ControlNetModel.from_pretrained(_snake_case , **_snake_case )
controlnets.append(_snake_case )
idx += 1
_lowerCAmelCase = pretrained_model_path + F'_{idx}'
logger.info(F'{len(_snake_case )} controlnets loaded from {pretrained_model_path}.' )
if len(_snake_case ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(_snake_case )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(_snake_case )
| 82 |
from __future__ import annotations
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
return n == n[::-1]
def _UpperCAmelCase ( snake_case = 1_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase = 0
for i in range(1 , snake_case ):
if is_palindrome(snake_case ) and is_palindrome(bin(snake_case ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 82 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 600851475143 ) -> int:
'''simple docstring'''
try:
A__ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 352 |
from jiwer import compute_measures
import datasets
lowercase_ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase_ = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowercase_ = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Value('string',id='sequence' ),
'references': datasets.Value('string',id='sequence' ),
} ),codebase_urls=['https://github.com/jitsi/jiwer/'],reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
],)
def snake_case__ ( self : int,lowercase_ : Any=None,lowercase_ : List[str]=None,lowercase_ : Dict=False )-> Optional[int]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowercase_,lowercase_ )["wer"]
else:
A__ = 0
A__ = 0
for prediction, reference in zip(lowercase_,lowercase_ ):
A__ = compute_measures(lowercase_,lowercase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 282 | 0 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_A : List[Any] = logging.getLogger(__name__)
_A : List[Any] = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = """bertabs"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : List[str]=0.2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : List[str]=7_68 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.2 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_pos
__lowerCAmelCase = enc_layers
__lowerCAmelCase = enc_hidden_size
__lowerCAmelCase = enc_heads
__lowerCAmelCase = enc_ff_size
__lowerCAmelCase = enc_dropout
__lowerCAmelCase = dec_layers
__lowerCAmelCase = dec_hidden_size
__lowerCAmelCase = dec_heads
__lowerCAmelCase = dec_ff_size
__lowerCAmelCase = dec_dropout
| 229 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_A : Optional[Any] = logging.get_logger(__name__)
# General docstring
_A : Optional[Any] = '''ResNetConfig'''
# Base docstring
_A : Tuple = '''microsoft/resnet-50'''
_A : List[str] = [1, 2048, 7, 7]
# Image classification docstring
_A : str = '''microsoft/resnet-50'''
_A : Dict = '''tiger cat'''
_A : List[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Any:
super().__init__()
__lowerCAmelCase = nn.Convad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> List[str]:
super().__init__()
__lowerCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCAmelCase = config.num_channels
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
return embedding
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict:
super().__init__()
__lowerCAmelCase = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Dict:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = out_channels // reduction
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> int:
super().__init__()
__lowerCAmelCase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = input
for layer in self.layers:
__lowerCAmelCase = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention:
__lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
__lowerCAmelCase = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = ResNetConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = """resnet"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """pixel_values"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> int:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = value
_A : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = ResNetModel(SCREAMING_SNAKE_CASE__ )
# classification head
__lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.resnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase = self.classifier(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = """single_label_classification"""
else:
__lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE__ )
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [config.embedding_size] + config.hidden_sizes
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BackboneOutput:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE__ , )
| 229 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
_snake_case = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 201 |
from __future__ import annotations
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = []
create_all_state(1 , __magic_name__ , __magic_name__ , [] , __magic_name__ )
return result
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__magic_name__ , total_number - level + 2 ):
current_list.append(__magic_name__ )
create_all_state(i + 1 , __magic_name__ , level - 1 , __magic_name__ , __magic_name__ )
current_list.pop()
def _A ( __magic_name__ ):
for i in total_list:
print(*__magic_name__ )
if __name__ == "__main__":
_snake_case = 4
_snake_case = 2
_snake_case = generate_all_combinations(n, k)
print_all_state(total_list)
| 201 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'The column name of the images in the files.'})
UpperCAmelCase__ : Optional[str] = field(default=__lowerCamelCase , metadata={'help': 'A folder containing the training data.'})
UpperCAmelCase__ : Optional[str] = field(default=__lowerCamelCase , metadata={'help': 'A folder containing the validation data.'})
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'})
UpperCAmelCase__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = {}
if self.train_dir is not None:
__lowerCamelCase = self.train_dir
if self.validation_dir is not None:
__lowerCamelCase = self.validation_dir
__lowerCamelCase = data_files if data_files else None
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : str = field(
default=__lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
UpperCAmelCase__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCAmelCase__ : str = field(default=__lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
UpperCAmelCase__ : float = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'})
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'})
@dataclass
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : float = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'})
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
__lowerCamelCase = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , A__ , A__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCamelCase = training_args.get_process_log_level()
logger.setLevel(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCamelCase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A__ ) and data_args.train_val_split > 0.0:
__lowerCamelCase = ds["""train"""].train_test_split(data_args.train_val_split )
__lowerCamelCase = split["""train"""]
__lowerCamelCase = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **A__ )
elif model_args.model_name_or_path:
__lowerCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **A__ )
else:
__lowerCamelCase = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__lowerCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **A__ )
elif model_args.model_name_or_path:
__lowerCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **A__ )
else:
__lowerCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__lowerCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__lowerCamelCase = ViTMAEForPreTraining(A__ )
if training_args.do_train:
__lowerCamelCase = ds["""train"""].column_names
else:
__lowerCamelCase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
__lowerCamelCase = data_args.image_column_name
elif "image" in column_names:
__lowerCamelCase = """image"""
elif "img" in column_names:
__lowerCamelCase = """img"""
else:
__lowerCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__lowerCamelCase = image_processor.size["""shortest_edge"""]
else:
__lowerCamelCase = (image_processor.size["""height"""], image_processor.size["""width"""])
__lowerCamelCase = Compose(
[
Lambda(lambda A__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(A__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(A__ : Optional[int] ):
__lowerCamelCase = [transforms(A__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__lowerCamelCase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(A__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__lowerCamelCase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(A__ )
# Compute absolute learning rate
__lowerCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__lowerCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__lowerCamelCase = Trainer(
model=A__ , args=A__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
__lowerCamelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase = last_checkpoint
__lowerCamelCase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCamelCase = trainer.evaluate()
trainer.log_metrics("""eval""" , A__ )
trainer.save_metrics("""eval""" , A__ )
# Write model card and (optionally) push to hub
__lowerCamelCase = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 12 |
from __future__ import annotations
__UpperCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
UpperCamelCase : dict[str, str | None] = {}
UpperCamelCase : Union[str, Any] = source_vertex
def snake_case_ ( self ) -> None:
UpperCamelCase : str = {self.source_vertex}
UpperCamelCase : str = None
UpperCamelCase : int = [self.source_vertex] # first in first out queue
while queue:
UpperCamelCase : Dict = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = vertex
queue.append(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCamelCase : Optional[Any] = self.parent.get(SCREAMING_SNAKE_CASE_ )
if target_vertex_parent is None:
UpperCamelCase : Union[str, Any] = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
return self.shortest_path(SCREAMING_SNAKE_CASE_ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 119 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a_ : List[Any] = False
try:
a_ : Dict = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class a :
def __init__( self , __magic_name__ = None , __magic_name__ = [] ) -> Any:
_a = 0
_a = choices
_a = prompt
if sys.platform == "win32":
_a = '*'
else:
_a = '➔ '
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = "" ) -> List[str]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __magic_name__ )
else:
forceWrite(self.choices[index] , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
if index == self.position:
forceWrite(f' {self.arrow_char} ' )
self.write_choice(__magic_name__ )
else:
forceWrite(f' {self.choices[index]}' )
reset_cursor()
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = 1 ) -> Optional[Any]:
_a = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__magic_name__ )
move_cursor(__magic_name__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def __UpperCAmelCase ( self ) -> Any:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def __UpperCAmelCase ( self ) -> Any:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def __UpperCAmelCase ( self ) -> Tuple:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def __UpperCAmelCase ( self ) -> int:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__magic_name__ )] for number in range(10 )] )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = int(chr(self.current_selection ) )
_a = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __magic_name__ )
else:
return
else:
return
def __UpperCAmelCase ( self , __magic_name__ = 0 ) -> Union[str, Any]:
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
_a = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__magic_name__ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
_a = int(builtins.input() )
except ValueError:
_a = default_choice
else:
_a = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(__magic_name__ , '\n' )
return choice
| 104 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=False , __magic_name__=True , __magic_name__="None" , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Any:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def __UpperCAmelCase ( self ) -> List[str]:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_config()
_a = 3_00
return config
def __UpperCAmelCase ( self , __magic_name__ ) -> Dict:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = DebertaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = DebertaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = self.num_labels
_a = DebertaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
_a = self.num_labels
_a = DebertaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = DebertaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> Any:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> List[str]:
_a = DebertaModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DebertaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ) -> Dict:
pass
@slow
def __UpperCAmelCase ( self ) -> int:
_a = DebertaModel.from_pretrained('microsoft/deberta-base' )
_a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
# compare the actual values for a slice.
_a = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 104 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "spiece.model"}
_UpperCamelCase = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_UpperCamelCase = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , A_ , A_=False , A_=False , A_=False , A_=None , A_=None , A_=None , A_=None , A_ = None , **A_ , ) ->None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCAmelCase : int = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__lowerCAmelCase : Union[str, Any] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCAmelCase : str = '''<|endoftext|>''' if eos_token is None else eos_token
__lowerCAmelCase : Any = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCAmelCase : Dict = unk_token if pad_token is None else pad_token
__lowerCAmelCase : int = eos_token if bos_token is None else bos_token
else:
__lowerCAmelCase : Optional[int] = '''<pad>''' if pad_token is None else pad_token
__lowerCAmelCase : List[str] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
__lowerCAmelCase : Union[str, Any] = do_lower_case
__lowerCAmelCase : Union[str, Any] = remove_space
__lowerCAmelCase : int = keep_accents
__lowerCAmelCase : Union[str, Any] = vocab_file
__lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# Used for whitespace normalization in input texts
# fmt : off
__lowerCAmelCase : List[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCAmelCase : int = re.compile(
f"""[{"".join(map(A_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
__lowerCAmelCase : List[Any] = None
return state
def __setstate__( self , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCAmelCase : List[Any] = {}
__lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : int = self.non_printing_characters_re.sub('''''' , A_ )
# Normalize whitespaces
__lowerCAmelCase : List[str] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__lowerCAmelCase : Tuple = unicodedata.normalize('''NFC''' , A_ )
return text
def UpperCamelCase__ ( self , A_ , **A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = self.preprocess_text(A_ )
return self.sp_model.encode(A_ , out_type=A_ )
def UpperCamelCase__ ( self , A_ ) ->int:
'''simple docstring'''
return self.sp_model.PieceToId(A_ )
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
return self.sp_model.IdToPiece(A_ )
@staticmethod
def UpperCamelCase__ ( A_ ) ->str:
'''simple docstring'''
return out_string
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = []
__lowerCAmelCase : Tuple = ''''''
__lowerCAmelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(A_ )
__lowerCAmelCase : str = False
out_string += self.sp_model.decode(A_ )
return out_string
def UpperCamelCase__ ( self ) ->Dict[str, int]:
'''simple docstring'''
__lowerCAmelCase : str = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , A_ , A_ = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase : Any = os.path.join(
A_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , '''wb''' ) as fi:
__lowerCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , A_ , A_ = False ) ->Union[List[int], List[List[int]], "torch.Tensor"]:
'''simple docstring'''
if isinstance(A_ , A_ ):
__lowerCAmelCase : Optional[Any] = self.preprocess_text(A_ )
__lowerCAmelCase : Dict = self.sp_model.encode(A_ )
else:
__lowerCAmelCase : Dict = [self.preprocess_text(A_ ) for t in text]
__lowerCAmelCase : Optional[int] = self.sp_model.encode(A_ )
if return_tensors is True or return_tensors == "pt":
__lowerCAmelCase : Tuple = torch.tensor(A_ )
return token_ids
def UpperCamelCase__ ( self , A_ ) ->str:
'''simple docstring'''
return self.sp_model.decode(A_ )
def UpperCamelCase__ ( self , A_ ) ->List[int]:
'''simple docstring'''
__lowerCAmelCase : int = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__lowerCAmelCase : Any = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(A_ ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=A_ )
| 275 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Optional[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Optional[Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Union[str, Any] = 8
else:
UpperCAmelCase_ : List[str] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase_ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[Any] = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : str = int(config["seed"] )
UpperCAmelCase_ : Tuple = int(config["batch_size"] )
set_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : Tuple = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : Tuple = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase_ : List[str] = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split("." )[0]
accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase_ : Dict = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_SCREAMING_SNAKE_CASE ),
"epoch": epoch,
} , step=_SCREAMING_SNAKE_CASE , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def a__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_SCREAMING_SNAKE_CASE , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 67 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_lowerCamelCase = datasets.utils.logging.get_logger(__name__)
class _snake_case (folder_based_builder.FolderBasedBuilderConfig):
__A : bool =None
__A : bool =None
class _snake_case (folder_based_builder.FolderBasedBuilder):
__A : Union[str, Any] =datasets.Audio()
__A : Optional[int] ="audio"
__A : Any =AudioFolderConfig
__A : List[str] # definition at the bottom of the script
__A : Optional[int] =AudioClassification(audio_column="audio" , label_column="label")
_lowerCamelCase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
_lowerCamelCase = AUDIO_EXTENSIONS
| 67 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Dict = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] ="""data2vec-audio"""
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="gelu" , lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=16 , lowerCAmelCase__=19 , lowerCAmelCase__=5 , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__="sum" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=256 , lowerCAmelCase__=(512, 512, 512, 512, 1500) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
a : List[str] = hidden_size
a : Union[str, Any] = feat_extract_activation
a : Any = list(lowerCAmelCase__ )
a : int = list(lowerCAmelCase__ )
a : str = list(lowerCAmelCase__ )
a : str = conv_bias
a : Union[str, Any] = num_conv_pos_embeddings
a : int = num_conv_pos_embedding_groups
a : Union[str, Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim )
a : List[str] = num_hidden_layers
a : str = intermediate_size
a : Tuple = hidden_act
a : List[str] = num_attention_heads
a : str = hidden_dropout
a : int = attention_dropout
a : Union[str, Any] = activation_dropout
a : Optional[Any] = feat_proj_dropout
a : str = final_dropout
a : Optional[Any] = layerdrop
a : List[str] = layer_norm_eps
a : int = initializer_range
a : Union[str, Any] = vocab_size
a : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : str = mask_time_prob
a : Optional[Any] = mask_time_length
a : Optional[Any] = mask_time_min_masks
a : List[Any] = mask_feature_prob
a : List[Any] = mask_feature_length
a : Optional[int] = mask_feature_min_masks
# ctc loss
a : Optional[int] = ctc_loss_reduction
a : int = ctc_zero_infinity
# adapter
a : str = add_adapter
a : Any = adapter_kernel_size
a : Any = adapter_stride
a : str = num_adapter_layers
a : str = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : Optional[int] = list(lowerCAmelCase__ )
a : List[Any] = list(lowerCAmelCase__ )
a : Dict = list(lowerCAmelCase__ )
a : Tuple = xvector_output_dim
@property
def __a ( self ) -> Union[str, Any]:
return math.prod(self.conv_stride )
| 105 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__magic_name__ = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__magic_name__ = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__magic_name__ = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__magic_name__ = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PegasusTokenizer
__SCREAMING_SNAKE_CASE = PegasusTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = PegasusTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def snake_case_ ( self , **_snake_case ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
return ("This is a test", "This is a test")
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''</s>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_snake_case ) , 1103 )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
UpperCAmelCase = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase = '''To ensure a smooth flow of bank resolutions.'''
UpperCAmelCase = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
UpperCAmelCase = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ['''This is going to be way too long.''' * 150, '''short example''']
UpperCAmelCase = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCAmelCase = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
UpperCAmelCase = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
@slow
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
# fmt: off
UpperCAmelCase = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PegasusTokenizer
__SCREAMING_SNAKE_CASE = PegasusTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = PegasusTokenizer(_snake_case , offset=0 , mask_token_sent=_snake_case , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def snake_case_ ( self , **_snake_case ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
return ("This is a test", "This is a test")
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
UpperCAmelCase = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
UpperCAmelCase = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ['''This is going to be way too long.''' * 1000, '''short example''']
UpperCAmelCase = ['''not super long but more than 5 tokens''', '''tiny''']
UpperCAmelCase = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
UpperCAmelCase = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
UpperCAmelCase = self._large_tokenizer(_snake_case ).input_ids
self.assertListEqual(
_snake_case , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 152 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int:
return abs(__UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCamelCase__ ,UpperCamelCase__ : Any = y, x % y
return abs(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> int:
try:
UpperCamelCase__ : Any = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCamelCase__ : Optional[Any] = int(nums[0] )
UpperCamelCase__ : int = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(__UpperCAmelCase , __UpperCAmelCase )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__UpperCAmelCase , __UpperCAmelCase )}" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 201 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: PreTrainedTokenizer , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] = None , ) -> List[Any]:
UpperCamelCase__ : Dict = {}
if train_file is not None:
UpperCamelCase__ : str = [train_file]
if eval_file is not None:
UpperCamelCase__ : Union[str, Any] = [eval_file]
if test_file is not None:
UpperCamelCase__ : Tuple = [test_file]
UpperCamelCase__ : Optional[Any] = datasets.load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase__ : str = features_name.pop(__UpperCAmelCase )
UpperCamelCase__ : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase__ : Optional[Any] = {label: i for i, label in enumerate(__UpperCAmelCase )}
UpperCamelCase__ : Union[str, Any] = tokenizer.model_input_names
UpperCamelCase__ : str = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
UpperCamelCase__ : Optional[int] = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) , batched=__UpperCAmelCase , )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
UpperCamelCase__ : Dict = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , ) , batched=__UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase__ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : int = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase__ : Tuple = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase__ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase__ : int = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase__ : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : int = field(metadata={"help": "Which column contains the label"} )
a : str = field(default=__lowerCamelCase , metadata={"help": "The path of the training file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the development file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the test file"} )
a : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : bool = field(default=__lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase_ ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase__ : str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase: EvalPrediction ) -> Dict:
UpperCamelCase__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase__ : Union[str, Any] = TFTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate()
UpperCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 201 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCAmelCase_ = 'examples/'
UpperCAmelCase_ = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCAmelCase_ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
UpperCAmelCase_ = 'README.md'
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ = f.read()
UpperCAmelCase__ , UpperCAmelCase__ = REPLACE_PATTERNS[pattern]
UpperCAmelCase__ = replace.replace("""VERSION""" , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = re_pattern.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , pattern="""examples""" )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = """🤗 Transformers currently provides the following architectures"""
UpperCAmelCase__ = """1. Want to contribute a new model?"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCAmelCase__ = f.readlines()
# Find the start of the list.
UpperCAmelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
UpperCAmelCase__ = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
UpperCAmelCase__ = f.read()
UpperCAmelCase__ = REPLACE_PATTERNS["""init"""][0].search(SCREAMING_SNAKE_CASE__ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str=False ):
'''simple docstring'''
UpperCAmelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
UpperCAmelCase__ = default_version.base_version
elif patch:
UpperCAmelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
UpperCAmelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
UpperCAmelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
UpperCAmelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(SCREAMING_SNAKE_CASE__ , patch=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = get_version()
UpperCAmelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
UpperCAmelCase__ = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
UpperCAmelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(SCREAMING_SNAKE_CASE__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCAmelCase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 61 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """pix2struct_text_model"""
lowerCAmelCase_ : str = ["""past_key_values"""]
lowerCAmelCase_ : Dict = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , _UpperCAmelCase : Dict=5_02_44 , _UpperCAmelCase : Tuple=7_68 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Any=1_28 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=1E-6 , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : str="gelu_new" , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = d_kv
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase__ = dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : int ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """pix2struct_vision_model"""
def __init__( self : Any , _UpperCAmelCase : List[Any]=7_68 , _UpperCAmelCase : Optional[int]=7_68 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Dict="gelu_new" , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Union[str, Any]=1E-10 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[int]=40_96 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Dict=1_28 , **_UpperCAmelCase : int , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = patch_embed_hidden_size
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = dense_act_fn
UpperCAmelCase__ = seq_len
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """pix2struct"""
lowerCAmelCase_ : Union[str, Any] = True
def __init__( self : int , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase__ = PixaStructTextConfig(**_UpperCAmelCase )
UpperCAmelCase__ = PixaStructVisionConfig(**_UpperCAmelCase )
UpperCAmelCase__ = self.text_config.decoder_start_token_id
UpperCAmelCase__ = self.text_config.pad_token_id
UpperCAmelCase__ = self.text_config.eos_token_id
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , _UpperCAmelCase : PixaStructTextConfig , _UpperCAmelCase : PixaStructVisionConfig , **_UpperCAmelCase : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 61 | 1 |
'''simple docstring'''
from PIL import Image
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(A__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowerCAmelCase__ = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 104 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
if isinstance(A__ , A__ ):
__lowercase = np.full((len(A__ ), sequence_length, 2) , A__ )
else:
__lowercase = np.full((len(A__ ), sequence_length) , A__ )
for i, tensor in enumerate(A__ ):
if padding_side == "right":
if isinstance(A__ , A__ ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
else:
if isinstance(A__ , A__ ):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
return out_tensor.tolist()
def _A ( A__ ):
"""simple docstring"""
__lowercase = ord(A__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__lowercase = unicodedata.category(A__ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = -1_0_0
SCREAMING_SNAKE_CASE : str = "pt"
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[str] ):
import torch
__lowercase = '''label''' if '''label''' in features[0].keys() else '''labels'''
__lowercase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase = self.tokenizer.pad(
lowercase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' if labels is None else None ,)
if labels is None:
return batch
__lowercase = torch.tensor(batch['''entity_ids'''] ).shape[1]
__lowercase = self.tokenizer.padding_side
if padding_side == "right":
__lowercase = [
list(lowercase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowercase__ )) for label in labels
]
else:
__lowercase = [
[self.label_pad_token_id] * (sequence_length - len(lowercase__ )) + list(lowercase__ ) for label in labels
]
__lowercase = [feature['''ner_tags'''] for feature in features]
__lowercase = padding_tensor(lowercase__ ,-1 ,lowercase__ ,lowercase__ )
__lowercase = [feature['''original_entity_spans'''] for feature in features]
__lowercase = padding_tensor(lowercase__ ,(-1, -1) ,lowercase__ ,lowercase__ )
__lowercase = {k: torch.tensor(lowercase__ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 104 | 1 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( lowercase__ , lowercase__=0.9_9_9 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCamelCase : List[str] = []
for i in range(__snake_case ):
_lowerCamelCase : Tuple = i / num_diffusion_timesteps
_lowerCamelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = [e.name for e in KarrasDiffusionSchedulers]
lowerCamelCase__ = 2
@register_to_config
def __init__( self , lowercase = 1000 , lowercase = 0.0_00_85 , lowercase = 0.0_12 , lowercase = "linear" , lowercase = None , lowercase = "epsilon" , lowercase = "linspace" , lowercase = 0 , ):
if trained_betas is not None:
_lowerCamelCase : Any = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCamelCase : Optional[Any] = torch.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : Optional[Any] = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCamelCase : Union[str, Any] = 1.0 - self.betas
_lowerCamelCase : Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase , lowercase=None ):
if schedule_timesteps is None:
_lowerCamelCase : List[Any] = self.timesteps
_lowerCamelCase : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCamelCase : int = 1 if len(_SCREAMING_SNAKE_CASE ) > 1 else 0
else:
_lowerCamelCase : str = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
_lowerCamelCase : Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A_ ( self ):
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A_ ( self , lowercase , lowercase , ):
_lowerCamelCase : Optional[int] = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
if self.state_in_first_order:
_lowerCamelCase : Union[str, Any] = self.sigmas[step_index]
else:
_lowerCamelCase : str = self.sigmas_interpol[step_index]
_lowerCamelCase : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A_ ( self , lowercase , lowercase = None , lowercase = None , ):
_lowerCamelCase : int = num_inference_steps
_lowerCamelCase : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCamelCase : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCamelCase : Optional[int] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Optional[int] = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(_SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCamelCase : List[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Tuple = (np.arange(_SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(_SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
_lowerCamelCase : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(np.log(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[int] = np.interp(_SCREAMING_SNAKE_CASE , np.arange(0 , len(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
_lowerCamelCase : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE )
# interpolate sigmas
_lowerCamelCase : Optional[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_lowerCamelCase : str = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCamelCase : List[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
# mps does not support float64
_lowerCamelCase : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
_lowerCamelCase : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
# interpolate timesteps
_lowerCamelCase : List[str] = self.sigma_to_t(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE , dtype=timesteps.dtype )
_lowerCamelCase : Tuple = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_lowerCamelCase : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
_lowerCamelCase : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCamelCase : List[str] = defaultdict(_SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase ):
_lowerCamelCase : str = sigma.log()
# get distribution
_lowerCamelCase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCamelCase : List[Any] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_lowerCamelCase : List[str] = low_idx + 1
_lowerCamelCase : str = self.log_sigmas[low_idx]
_lowerCamelCase : str = self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCamelCase : Dict = (low - log_sigma) / (low - high)
_lowerCamelCase : Union[str, Any] = w.clamp(0 , 1 )
# transform interpolation to time range
_lowerCamelCase : List[Any] = (1 - w) * low_idx + w * high_idx
_lowerCamelCase : Union[str, Any] = t.view(sigma.shape )
return t
@property
def A_ ( self ):
return self.sample is None
def A_ ( self , lowercase , lowercase , lowercase , lowercase = True , ):
_lowerCamelCase : Tuple = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
# advance index counter by 1
_lowerCamelCase : Dict = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCamelCase : List[str] = self.sigmas[step_index]
_lowerCamelCase : Optional[int] = self.sigmas_interpol[step_index + 1]
_lowerCamelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCamelCase : List[Any] = self.sigmas[step_index - 1]
_lowerCamelCase : Optional[int] = self.sigmas_interpol[step_index]
_lowerCamelCase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCamelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCamelCase : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCamelCase : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCamelCase : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCamelCase : str = sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCamelCase : Dict = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCamelCase : Any = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCamelCase : List[str] = sigma_next - sigma_hat
_lowerCamelCase : str = self.sample
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Union[str, Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def A_ ( self , lowercase , lowercase , lowercase , ):
_lowerCamelCase : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
# mps does not support float64
_lowerCamelCase : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_lowerCamelCase : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_lowerCamelCase : Tuple = self.timesteps.to(original_samples.device )
_lowerCamelCase : Optional[int] = timesteps.to(original_samples.device )
_lowerCamelCase : List[Any] = [self.index_for_timestep(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for t in timesteps]
_lowerCamelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCamelCase : Union[str, Any] = sigma.unsqueeze(-1 )
_lowerCamelCase : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 363 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
_lowerCamelCase : float = 0
_lowerCamelCase : list[float] = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
_lowerCamelCase : int = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCamelCase : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Optional[int] =XLMTokenizer
lowerCamelCase : List[Any] =False
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(a ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Any ):
"""simple docstring"""
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = XLMTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase = '''lower'''
__lowerCamelCase = ['''low''', '''er</w>''']
__lowerCamelCase = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase = tokens + ['''<unk>''']
__lowerCamelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
__lowerCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 67 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class a__ :
lowerCamelCase : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowerCamelCase : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCamelCase : List[int] =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 67 | 1 |
from collections import defaultdict
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = first_str.lower().strip()
lowercase__ = second_str.lower().strip()
# Remove whitespace
lowercase__ = first_str.replace(''' ''' , '''''' )
lowercase__ = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
return False
# Default values for count should be 0
lowercase__ = defaultdict(SCREAMING_SNAKE_CASE )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(SCREAMING_SNAKE_CASE ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase = input('Enter the first string ').strip()
lowerCAmelCase = input('Enter the second string ').strip()
lowerCAmelCase = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 93 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''lxmert'''
_lowercase : Any = {}
def __init__( self: Any , UpperCamelCase_: List[Any]=30_522 , UpperCamelCase_: int=768 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Dict=9_500 , UpperCamelCase_: List[Any]=1_600 , UpperCamelCase_: List[Any]=400 , UpperCamelCase_: List[str]=3_072 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: List[str]=512 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Dict=1E-1_2 , UpperCamelCase_: List[Any]=9 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[Any]=5 , UpperCamelCase_: str=2_048 , UpperCamelCase_: Dict=4 , UpperCamelCase_: Any=6.67 , UpperCamelCase_: Dict=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Any=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=True , **UpperCamelCase_: Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = num_qa_labels
lowercase__ = num_object_labels
lowercase__ = num_attr_labels
lowercase__ = l_layers
lowercase__ = x_layers
lowercase__ = r_layers
lowercase__ = visual_feat_dim
lowercase__ = visual_pos_dim
lowercase__ = visual_loss_normalizer
lowercase__ = task_matched
lowercase__ = task_mask_lm
lowercase__ = task_obj_predict
lowercase__ = task_qa
lowercase__ = visual_obj_loss
lowercase__ = visual_attr_loss
lowercase__ = visual_feat_loss
lowercase__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**UpperCamelCase_ )
| 93 | 1 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase ( __lowerCamelCase : str ) ->Optional[int]:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
def lowerCamelCase ( *__lowerCamelCase : List[str] ) ->Dict:
def decorator(__lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(__lowerCamelCase , """handle_key""" , __lowerCamelCase )
return func
return decorator
class a_ ( snake_case_ ):
'''simple docstring'''
def __new__( cls , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_SCREAMING_SNAKE_CASE = getattr(A , """handle_key""" , [] )
for key in handled_keys:
_SCREAMING_SNAKE_CASE = value
return new_cls
@staticmethod
def snake_case_( cls ) -> str:
_SCREAMING_SNAKE_CASE = get_character()
if char != KEYMAP["undefined"]:
_SCREAMING_SNAKE_CASE = ord(A )
_SCREAMING_SNAKE_CASE = cls.key_handler.get(A )
if handler:
_SCREAMING_SNAKE_CASE = char
return handler(cls )
else:
return None
def lowerCamelCase ( cls : Any ) ->Dict:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 58 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a_ = 5_0_0_0_3
a_ = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = PLBartTokenizer
snake_case_ = None
snake_case_ = False
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] =PLBartTokenizer(__lowercase , language_codes='''base''' , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =PLBartTokenizer(__lowercase , language_codes='''base''' , keep_accents=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : str =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Dict =[tokenizer.convert_ids_to_tokens(__lowercase ) for x in range(end - 4 , __lowercase )]
self.assertListEqual(__lowercase , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
SCREAMING_SNAKE_CASE__ : str ='''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase ).input_ids
self.assertEqual(
tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase ) , __lowercase , )
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int =PLBartTokenizer(__lowercase , language_codes='''multi''' , keep_accents=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : List[str] =[tokenizer.convert_ids_to_tokens(__lowercase ) for x in range(end - 7 , __lowercase )]
self.assertListEqual(
__lowercase , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
SCREAMING_SNAKE_CASE__ : Any ='''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer(__lowercase ).input_ids
self.assertEqual(
tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase ) , __lowercase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case_ = """uclanlp/plbart-python-en_XX"""
snake_case_ = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
snake_case_ = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
snake_case_ = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __magic_name__ ( cls : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : PLBartTokenizer =PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
SCREAMING_SNAKE_CASE__ : int =1
return cls
def __magic_name__ ( self : int ) -> Tuple:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_00_03 )
def __magic_name__ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def __magic_name__ ( self : List[str] ) -> Tuple:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Tuple =[EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE__ : int =self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , __lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =10
SCREAMING_SNAKE_CASE__ : Tuple =self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def __magic_name__ ( self : Any ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_00_04, 5_00_01] )
def __magic_name__ ( self : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =PLBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : str =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __lowercase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Dict =shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=10 , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Dict =targets['''input_ids''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] =shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[1_50, 2_42, 2, 5_00_03]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_00_01,
} , )
| 152 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowercase = 2**power
lowercase = str(lowerCAmelCase__ )
lowercase = list(lowerCAmelCase__ )
lowercase = 0
for i in list_num:
sum_of_num += int(lowerCAmelCase__ )
return sum_of_num
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
__lowerCAmelCase : Dict =solution(power)
print("""Sum of the digits is: """, result)
| 32 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
for attribute in key.split("." ):
UpperCAmelCase_ : List[str] = getattr(__lowerCamelCase, __lowerCamelCase )
if weight_type is not None:
UpperCAmelCase_ : List[str] = getattr(__lowerCamelCase, __lowerCamelCase ).shape
else:
UpperCAmelCase_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ : List[str] = value
elif weight_type == "weight_g":
UpperCAmelCase_ : List[str] = value
elif weight_type == "weight_v":
UpperCAmelCase_ : int = value
elif weight_type == "bias":
UpperCAmelCase_ : int = value
else:
UpperCAmelCase_ : Optional[int] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = []
UpperCAmelCase_ : int = fairseq_model.state_dict()
UpperCAmelCase_ : int = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ : int = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, hf_model.config.feat_extract_norm == "group", )
UpperCAmelCase_ : int = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ : List[str] = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
UpperCAmelCase_ : Union[str, Any] = True
if "*" in mapped_key:
UpperCAmelCase_ : Tuple = name.split(__lowerCamelCase )[0].split("." )[-2]
UpperCAmelCase_ : List[Any] = mapped_key.replace("*", __lowerCamelCase )
if "weight_g" in name:
UpperCAmelCase_ : Optional[Any] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ : int = "weight_v"
elif "weight" in name:
UpperCAmelCase_ : List[Any] = "weight"
elif "bias" in name:
UpperCAmelCase_ : Union[str, Any] = "bias"
else:
UpperCAmelCase_ : Optional[int] = None
set_recursively(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ : Tuple = name.split("." )
UpperCAmelCase_ : int = int(items[0] )
UpperCAmelCase_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=True ):
if config_path is not None:
UpperCAmelCase_ : Any = HubertConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase_ : Dict = HubertConfig()
if is_finetuned:
if dict_path:
UpperCAmelCase_ : str = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_ : str = target_dict.pad_index
UpperCAmelCase_ : int = target_dict.bos_index
UpperCAmelCase_ : List[str] = target_dict.eos_index
UpperCAmelCase_ : Optional[Any] = len(target_dict.symbols )
UpperCAmelCase_ : int = os.path.join(__lowerCamelCase, "vocab.json" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
with open(__lowerCamelCase, "w", encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = WavaVecaCTCTokenizer(
__lowerCamelCase, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=__lowerCamelCase, )
UpperCAmelCase_ : int = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase_ : Dict = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0, do_normalize=__lowerCamelCase, return_attention_mask=__lowerCamelCase, )
UpperCAmelCase_ : Tuple = WavaVecaProcessor(feature_extractor=__lowerCamelCase, tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = HubertForCTC(__lowerCamelCase )
else:
UpperCAmelCase_ : str = HubertModel(__lowerCamelCase )
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCAmelCase_ : int = model[0].eval()
recursively_load_weights(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_a = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 61 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ):
UpperCAmelCase_ : List[Any] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ : Optional[int] = Dataset.from_dict(__lowerCamelCase )
return dataset
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_dataset()
UpperCAmelCase_ : Any = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowercase_ )
| 61 | 1 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__A : Union[str, Any] = TypeVar('T')
class __UpperCamelCase ( Generic[T] ):
lowercase : deque[T] # Cache store of keys
lowercase : set[T] # References of the keys in cache
lowercase : int = 1_0 # Maximum capacity of cache
def __init__( self :int ,_UpperCamelCase :int ):
snake_case_ : List[str] = deque()
snake_case_ : Union[str, Any] = set()
if not n:
snake_case_ : List[str] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
snake_case_ : Tuple = n
def a__ ( self :int ,_UpperCamelCase :T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case_ : Union[str, Any] = self.dq_store.pop()
self.key_reference.remove(_UpperCamelCase )
else:
self.dq_store.remove(_UpperCamelCase )
self.dq_store.appendleft(_UpperCamelCase )
self.key_reference.add(_UpperCamelCase )
def a__ ( self :Optional[int] ):
for k in self.dq_store:
print(_UpperCamelCase )
def __repr__( self :Any ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 8 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ )
for row_idx in range(lowerCamelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case_ : list[list[int]] = []
for current_row_idx in range(lowerCamelCase_ ):
snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ )
triangle.append(lowerCamelCase_ )
return triangle
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case_ , snake_case_ : Optional[Any] = 1, 1
for current_col_idx in range(1 , lowerCamelCase_ ):
calculate_current_element(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return current_row
def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx]
snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
snake_case_ : list[list[int]] = [[1]]
for row_index in range(1 , lowerCamelCase_ ):
snake_case_ : Optional[Any] = [0] + result[-1] + [0]
snake_case_ : Dict = row_index + 1
# Calculate the number of distinct elements in a row
snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) )
snake_case_ : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case_ : str = row_first_half + row_second_half
result.append(lowerCamelCase_ )
return result
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None:
snake_case_ : Dict = F'''{func.__name__}({value})'''
snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 8 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase ( __lowerCamelCase : int ) ->Optional[Any]:
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase ( ) ->Tuple:
_SCREAMING_SNAKE_CASE = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
TestCommand.register_subcommand(__lowerCamelCase )
RunBeamCommand.register_subcommand(__lowerCamelCase )
DummyDataCommand.register_subcommand(__lowerCamelCase )
# Parse args
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_known_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
_SCREAMING_SNAKE_CASE = parse_unknown_args(__lowerCamelCase )
# Run
_SCREAMING_SNAKE_CASE = args.func(__lowerCamelCase , **__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 58 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
lowercase__ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : Union[str, Any] = 50257 , UpperCamelCase__ : Optional[int] = 1024 , UpperCamelCase__ : Union[str, Any] = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Tuple = 12 , UpperCamelCase__ : Tuple = None , UpperCamelCase__ : Dict = "gelu_new" , UpperCamelCase__ : Any = 0.1 , UpperCamelCase__ : Union[str, Any] = 0.1 , UpperCamelCase__ : Union[str, Any] = 0.1 , UpperCamelCase__ : Tuple = 1E-5 , UpperCamelCase__ : Union[str, Any] = 0.02 , UpperCamelCase__ : Any = True , UpperCamelCase__ : Optional[int] = True , UpperCamelCase__ : str = False , UpperCamelCase__ : Optional[Any] = False , ) -> int:
'''simple docstring'''
super().__init__()
__UpperCamelCase =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
__UpperCamelCase =prefix_inner_dim
__UpperCamelCase =prefix_hidden_dim
__UpperCamelCase =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__UpperCamelCase =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__UpperCamelCase =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
__UpperCamelCase =GPTaLMHeadModel(UpperCamelCase__ )
def UpperCAmelCase_ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : List[str] = None , ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.transformer.transformer.wte(UpperCamelCase__ )
__UpperCamelCase =self.encode_prefix(UpperCamelCase__ )
__UpperCamelCase =self.decode_prefix(UpperCamelCase__ )
__UpperCamelCase =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__UpperCamelCase =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__UpperCamelCase =torch.cat((dummy_token, input_ids) , dim=1 )
__UpperCamelCase =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any ) -> torch.Tensor:
'''simple docstring'''
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =torch.split(UpperCamelCase__ , 1 , dim=0 )
__UpperCamelCase =[]
__UpperCamelCase =[]
for feature in features:
__UpperCamelCase =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
__UpperCamelCase , __UpperCamelCase =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__UpperCamelCase =torch.stack(UpperCamelCase__ )
__UpperCamelCase =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any] = 5 , UpperCamelCase__ : List[str] = 67 , UpperCamelCase__ : str = 1.0 , UpperCamelCase__ : List[str] = None , ) -> Any:
'''simple docstring'''
__UpperCamelCase =eos_token_id
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
__UpperCamelCase =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
__UpperCamelCase =input_embeds
else:
__UpperCamelCase =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
__UpperCamelCase =self.transformer(inputs_embeds=UpperCamelCase__ )
__UpperCamelCase =outputs.logits
__UpperCamelCase =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__UpperCamelCase =logits.softmax(-1 ).log()
if scores is None:
__UpperCamelCase , __UpperCamelCase =logits.topk(UpperCamelCase__ , -1 )
__UpperCamelCase =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
__UpperCamelCase , __UpperCamelCase =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__UpperCamelCase =next_tokens
else:
__UpperCamelCase =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
__UpperCamelCase =torch.cat((tokens, next_tokens) , dim=1 )
else:
__UpperCamelCase =-float(np.inf )
__UpperCamelCase =0
__UpperCamelCase =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__UpperCamelCase =scores_sum / seq_lengths[:, None]
__UpperCamelCase , __UpperCamelCase =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
__UpperCamelCase =next_tokens // scores_sum.shape[1]
__UpperCamelCase =seq_lengths[next_tokens_source]
__UpperCamelCase =next_tokens % scores_sum.shape[1]
__UpperCamelCase =next_tokens.unsqueeze(1 )
__UpperCamelCase =tokens[next_tokens_source]
__UpperCamelCase =torch.cat((tokens, next_tokens) , dim=1 )
__UpperCamelCase =generated[next_tokens_source]
__UpperCamelCase =scores_sum_average * seq_lengths
__UpperCamelCase =is_stopped[next_tokens_source]
__UpperCamelCase =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__UpperCamelCase =torch.cat((generated, next_token_embed) , dim=1 )
__UpperCamelCase =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
__UpperCamelCase =scores / seq_lengths
__UpperCamelCase =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
__UpperCamelCase =[tokens[i] for i in order]
__UpperCamelCase =torch.stack(UpperCamelCase__ , dim=0 )
__UpperCamelCase =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 350 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''facebook/bart-large-mnli'''
lowercase__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowercase__ = '''text_classifier'''
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSequenceClassification
lowercase__ = ['''text''', ['''text''']]
lowercase__ = ['''text''']
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().setup()
__UpperCamelCase =self.model.config
__UpperCamelCase =-1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__UpperCamelCase =int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase =labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =outputs.logits
__UpperCamelCase =torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 85 | 0 |
'''simple docstring'''
from math import ceil
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 1001 ):
"""simple docstring"""
lowercase_ : Tuple = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowercase_ : Optional[Any] = 2 * i + 1
lowercase_ : Any = 2 * i
lowercase_ : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_lowercase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 93 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 | 1 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_snake_case = logging.getLogger(__name__)
_snake_case = 'Hello world! cécé herlolip'
_snake_case = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = BertAbsConfig(
temp_dir=""".""" , finetune_bert=UpperCamelCase__ , large=UpperCamelCase__ , share_emb=UpperCamelCase__ , use_bert_emb=UpperCamelCase__ , encoder="""bert""" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
_a : List[Any] = torch.load(UpperCamelCase__ , lambda UpperCamelCase__ , UpperCamelCase__ : storage )
_a : str = AbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) , UpperCamelCase__ )
original.eval()
_a : Any = BertAbsSummarizer(UpperCamelCase__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_a : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_a : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCamelCase__ )) )
_a : str = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
_a : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCamelCase__ )) )
_a : Optional[Any] = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_a : Any = encoder_input_ids
_a : Dict = decoder_input_ids
_a : Dict = None
_a : int = None
_a : List[Any] = None
_a : List[str] = None
_a : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_a : List[str] = original(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
_a : Optional[int] = original.generator(UpperCamelCase__ )
_a : int = new_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )[0]
_a : List[str] = new_model.generator(UpperCamelCase__ )
_a : Dict = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
_a : List[str] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase__ ) )
_a : Optional[Any] = torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_snake_case = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 324 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : List[str] = logging.getLogger(__name__)
@dataclass(frozen=lowercase__ )
class SCREAMING_SNAKE_CASE__ :
snake_case__ : str
snake_case__ : str
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
@dataclass(frozen=lowercase__ )
class SCREAMING_SNAKE_CASE__ :
snake_case__ : List[int]
snake_case__ : Optional[List[int]] = None
snake_case__ : Optional[List[int]] = None
snake_case__ : Optional[Union[int, float]] = None
snake_case__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[InputFeatures]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[Any]:
a_ : Dict = hans_processors[task]()
a_ : str = os.path.join(
SCREAMING_SNAKE_CASE__ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , ) , )
a_ : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a_ , a_ : Optional[Any] = label_list[2], label_list[1]
a_ : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a_ : Dict = cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
a_ : Optional[int] = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
a_ : Optional[Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE__ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE__ )
)
logger.info('Training examples: %s' , len(SCREAMING_SNAKE_CASE__ ) )
a_ : Union[str, Any] = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info('Saving features into cached file %s' , SCREAMING_SNAKE_CASE__ )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self : List[Any] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> InputFeatures:
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
snake_case__ : List[InputFeatures]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = 1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> str:
a_ : Any = hans_processors[task]()
a_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a_ , a_ : List[Any] = label_list[2], label_list[1]
a_ : Union[str, Any] = label_list
a_ : Tuple = processor.get_dev_examples(SCREAMING_SNAKE_CASE__ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(SCREAMING_SNAKE_CASE__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
a_ : Tuple = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
return self.dataset
def __len__( self : Dict ) -> Tuple:
return len(self.features )
def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> InputFeatures:
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
return self.label_list
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE__ , 'heuristics_train_set.txt' ) ) , 'train' )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE__ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
a_ : List[Any] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == 0:
continue
a_ : List[Any] = '%s-%s' % (set_type, line[0])
a_ : Tuple = line[5]
a_ : Optional[int] = line[6]
a_ : List[Any] = line[7][2:] if line[7].startswith('ex' ) else line[7]
a_ : Union[str, Any] = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE__ , text_a=SCREAMING_SNAKE_CASE__ , text_b=SCREAMING_SNAKE_CASE__ , label=SCREAMING_SNAKE_CASE__ , pairID=SCREAMING_SNAKE_CASE__ ) )
return examples
def SCREAMING_SNAKE_CASE_ ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Union[str, Any]:
"""simple docstring"""
a_ : Tuple = {label: i for i, label in enumerate(__A )}
a_ : List[str] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
a_ : Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding='max_length' , truncation=__A , return_overflowing_tokens=__A , )
a_ : Union[str, Any] = label_map[example.label] if example.label in label_map else 0
a_ : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
UpperCAmelCase_ : List[str] = {
'hans': 3,
}
UpperCAmelCase_ : Tuple = {
'hans': HansProcessor,
}
| 32 |
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : str = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str:
"""simple docstring"""
assert len(str(__A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a_ : List[str] = year // 1_00
a_ : Optional[int] = (5 * (century % 4) + 2) % 7
a_ : List[str] = year % 1_00
a_ : str = centurian % 12
a_ : List[str] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a_ : Any = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a_ : Any = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = []
def a__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if board[row][i] == 1:
return False
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(SCREAMING_SNAKE_CASE , -1 , -1 ) , range(SCREAMING_SNAKE_CASE , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(SCREAMING_SNAKE_CASE , -1 , -1 ) , range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ) ):
if board[i][j] == 1:
return False
return True
def a__ ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if row >= len(SCREAMING_SNAKE_CASE ):
solution.append(SCREAMING_SNAKE_CASE )
printboard(SCREAMING_SNAKE_CASE )
print()
return True
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if is_safe(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = 1
solve(SCREAMING_SNAKE_CASE , row + 1 )
lowerCAmelCase : List[Any] = 0
return False
def a__ ( SCREAMING_SNAKE_CASE : list[list[int]] ):
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase__ = 8
lowerCAmelCase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 133 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 133 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''T''')
class snake_case_ ( Generic[T] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : Dict , _UpperCamelCase : int ) ->None:
snake_case_ = deque()
snake_case_ = set()
if not n:
snake_case_ = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
snake_case_ = n
def snake_case__( self : Dict , _UpperCamelCase : T ) ->None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case_ = self.dq_store.pop()
self.key_reference.remove(_UpperCamelCase )
else:
self.dq_store.remove(_UpperCamelCase )
self.dq_store.appendleft(_UpperCamelCase )
self.key_reference.add(_UpperCamelCase )
def snake_case__( self : List[Any] ) ->None:
for k in self.dq_store:
print(_UpperCamelCase )
def __repr__( self : Optional[Any] ) ->str:
return f'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 8 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
snake_case_ = 1
return True
snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds
snake_case_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case_ = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
snake_case_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = data
# Initialize hash values
UpperCAmelCase_ : List[str] = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
UpperCAmelCase_ : Tuple = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
UpperCAmelCase_ : Dict = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = b'\x80' + (b'\x00' * (6_3 - (len(snake_case_ ) + 8) % 6_4))
UpperCAmelCase_ : Any = struct.pack('>Q' , (len(snake_case_ ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase_ : Union[str, Any] = list(struct.unpack('>16L' , snake_case_ ) )
# add 48 0-ed integers
words += [0] * 4_8
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase_ : str = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
UpperCAmelCase_ : List[str] = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
UpperCAmelCase_ : Tuple = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
UpperCAmelCase_ : List[str] = self.ror(snake_case_ , 6 ) ^ self.ror(snake_case_ , 1_1 ) ^ self.ror(snake_case_ , 2_5 )
UpperCAmelCase_ : int = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
UpperCAmelCase_ : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
UpperCAmelCase_ : List[Any] = self.ror(snake_case_ , 2 ) ^ self.ror(snake_case_ , 1_3 ) ^ self.ror(snake_case_ , 2_2 )
UpperCAmelCase_ : Union[str, Any] = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase_ : Tuple = (sa + maj) % 0x1_0000_0000
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
UpperCAmelCase_ : Any = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase_ : List[str] = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase_ : Union[str, Any] = ''.join([hex(snake_case_ )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
return 0xFFFF_FFFF & (value << (3_2 - rotations)) | (value >> rotations)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
import hashlib
UpperCAmelCase_ : int = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(snake_case_ ).hash , hashlib.shaaaa(snake_case_ ).hexdigest() )
def _lowerCamelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCAmelCase_ : Optional[Any] = f.read()
else:
UpperCAmelCase_ : Optional[Any] = bytes(lowerCamelCase_ , 'utf-8' )
print(SHAaaa(lowerCamelCase_ ).hash )
if __name__ == "__main__":
main()
| 274 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = " " ):
"""simple docstring"""
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = 0
for index, char in enumerate(lowerCamelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : Optional[Any] = index + 1
elif index + 1 == len(lowerCamelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 274 | 1 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: List[str] ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[Any] = nn.Linear(3, 4 )
_snake_case : List[Any] = nn.BatchNormad(4 )
_snake_case : Any = nn.Linear(4, 5 )
def UpperCamelCase_ ( self: str, a_: Optional[Any] ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_, model.state_dict() )
_snake_case : Dict = os.path.join(a_, """index.json""" )
self.assertTrue(os.path.isfile(a_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
_snake_case : Tuple = os.path.join(a_, f"{key}.dat" )
self.assertTrue(os.path.isfile(a_ ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : List[Any] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
_snake_case : List[Any] = torch.randn(2, 3, dtype=a_ )
with TemporaryDirectory() as tmp_dir:
_snake_case : Optional[int] = offload_weight(a_, """weight""", a_, {} )
_snake_case : Optional[int] = os.path.join(a_, """weight.dat""" )
self.assertTrue(os.path.isfile(a_ ) )
self.assertDictEqual(a_, {"""weight""": {"""shape""": [2, 3], """dtype""": str(a_ ).split(""".""" )[1]}} )
_snake_case : int = load_offloaded_weight(a_, index["""weight"""] )
self.assertTrue(torch.equal(a_, a_ ) )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : int = ModelForTest()
_snake_case : int = model.state_dict()
_snake_case : Any = {k: v for k, v in state_dict.items() if """linear2""" not in k}
_snake_case : Optional[int] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_, a_ )
_snake_case : Optional[int] = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_, weight_map[key] ) )
_snake_case : Tuple = {k: v for k, v in state_dict.items() if """weight""" in k}
_snake_case : List[str] = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_, a_ )
_snake_case : Dict = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_, weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(a_, a_ )
# Duplicates are removed
_snake_case : int = OffloadedWeightsLoader(state_dict=a_, save_folder=a_ )
# Every key is there with the right value
self.assertEqual(sorted(a_ ), sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(a_, weight_map[key] ) )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : int = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
_snake_case : Tuple = extract_submodules_state_dict(a_, ["""a.1""", """a.2"""] )
self.assertDictEqual(a_, {"""a.1""": 0, """a.2""": 2} )
_snake_case : Optional[int] = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
_snake_case : Tuple = extract_submodules_state_dict(a_, ["""a.1""", """a.2"""] )
self.assertDictEqual(a_, {"""a.1.a""": 0, """a.2.a""": 2} )
| 64 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = MgpstrTokenizer
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : List[Any] = False
def a ( self ):
super().setUp()
# fmt: off
snake_case_ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
def a ( self , **snake_case ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , snake_case ):
snake_case_ = 'tester'
snake_case_ = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def a ( self ):
pass
def a ( self ):
snake_case_ = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
snake_case_ = tokenizer.encode([special_token] , add_special_tokens=snake_case )
self.assertEqual(len(snake_case ) , 1 )
snake_case_ = tokenizer.decode(snake_case , skip_special_tokens=snake_case )
self.assertTrue(special_token not in decoded )
def a ( self ):
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ , snake_case_ = self.get_input_output_texts(snake_case )
snake_case_ = tokenizer.tokenize(snake_case )
snake_case_ = tokenizer.convert_tokens_to_ids(snake_case )
snake_case_ = tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokenizer.convert_ids_to_tokens(snake_case )
self.assertNotEqual(len(snake_case ) , 0 )
snake_case_ = tokenizer.decode(snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(text_a.replace(' ' , '' ) , snake_case )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def a ( self ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def a ( self ):
pass
| 200 |
import os
import sys
import unittest
_UpperCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_UpperCAmelCase : List[Any] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_UpperCAmelCase : Dict = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = get_test_to_tester_mapping(snake_case )
snake_case_ = get_test_to_tester_mapping(snake_case )
snake_case_ = {'BertModelTest': 'BertModelTester'}
snake_case_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def a ( self ):
snake_case_ = get_model_to_test_mapping(snake_case )
snake_case_ = get_model_to_test_mapping(snake_case )
snake_case_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
def a ( self ):
snake_case_ = get_model_to_tester_mapping(snake_case )
snake_case_ = get_model_to_tester_mapping(snake_case )
snake_case_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
self.assertEqual(get_test_info.to_json(snake_case ) , snake_case )
| 200 | 1 |
'''simple docstring'''
import operator as op
def a__ ( lowercase : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = lambda lowercase, lowercase : int(x / y ) # noqa: E731 integer division operation
_UpperCamelCase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ), '''Action'''.center(12 ), '''Stack''', sep=''' | ''' )
print('''-''' * (30 + len(lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ), ('''push(''' + x + ''')''').ljust(12 ), ''','''.join(lowercase ), sep=''' | ''' )
else:
_UpperCamelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + b + ''')''').ljust(12 ), ''','''.join(lowercase ), sep=''' | ''' )
_UpperCamelCase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ), ('''pop(''' + a + ''')''').ljust(12 ), ''','''.join(lowercase ), sep=''' | ''' )
stack.append(
str(opr[x](int(lowercase ), int(lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ), ('''push(''' + a + x + b + ''')''').ljust(12 ), ''','''.join(lowercase ), sep=''' | ''', )
return int(stack[0] )
if __name__ == "__main__":
lowercase__ : Dict = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 324 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
_UpperCamelCase = iter(lowercase )
while True:
_UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) )
if not chunk:
return
yield chunk
def a__ ( lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCamelCase = ''''''
if len(lowercase ) < 2:
return dirty
for i in range(len(lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase ) & 1:
clean += "X"
return clean
def a__ ( lowercase : str ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCamelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase )
return table
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = prepare_input(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a__ ( lowercase : str, lowercase : str ) -> str:
"""simple docstring"""
_UpperCamelCase = generate_table(lowercase )
_UpperCamelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase, 2 ):
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
_UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 324 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Dict , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase = do_convert_rgb
def A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCamelCase = (size['height'], size['width'])
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCamelCase = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 364 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
UpperCamelCase = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
UpperCamelCase__ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
] , )
UpperCamelCase = text_generator.model.config.eos_token_id
UpperCamelCase = '<pad>'
UpperCamelCase = text_generator(
['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
],
[
{'generated_token_ids': ANY(UpperCamelCase__ )},
{'generated_token_ids': ANY(UpperCamelCase__ )},
],
] , )
@require_tf
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase = text_generator('This is a test' , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
UpperCamelCase = text_generator(['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = TextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return text_generator, ["This is a test", "Another test"]
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'Hello I believe in'
UpperCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
UpperCamelCase = text_generator(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
UpperCamelCase = text_generator(UpperCamelCase__ , stop_sequence=' fe' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': 'Hello I believe in fe'}] )
def A ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = text_generator.model
UpperCamelCase = text_generator.tokenizer
UpperCamelCase = text_generator('This is a test' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
UpperCamelCase = text_generator('This is a test' , return_full_text=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
UpperCamelCase = pipeline(task='text-generation' , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , return_full_text=UpperCamelCase__ )
UpperCamelCase = text_generator('This is a test' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
UpperCamelCase = text_generator('This is a test' , return_full_text=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
UpperCamelCase = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_full_text=UpperCamelCase__ , return_text=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_full_text=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = text_generator('test' , return_text=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase = text_generator('' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_0_0 , max_new_tokens=2_0 )
UpperCamelCase = text_generator('This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(UpperCamelCase__ ):
text_generator(
'This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
UpperCamelCase = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase = pipe('This is a test' )
self.assertEqual(
UpperCamelCase__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
import torch
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def A ( self : Any ):
"""simple docstring"""
import torch
UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=UpperCamelCase__ , top_p=0.5 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'Hello world'
UpperCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
UpperCamelCase = logging.get_logger('transformers.generation.tf_utils' )
else:
UpperCamelCase = logging.get_logger('transformers.generation.utils' )
UpperCamelCase = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(UpperCamelCase__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_new_tokens=1 )
self.assertNotIn(UpperCamelCase__ , cl.out )
with CaptureLogger(UpperCamelCase__ ) as cl:
UpperCamelCase = text_generator(UpperCamelCase__ , max_length=1_0 )
self.assertNotIn(UpperCamelCase__ , cl.out )
| 249 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ : Any = 'src/diffusers'
lowercase_ : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ : int = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ : Dict = spec.loader.load_module()
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return line.startswith(snake_case_ ) or len(snake_case_ ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case_ ) is not None
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = object_name.split("." )
_UpperCAmelCase = 0
# First let's find the module where our object lives.
_UpperCAmelCase = parts[i]
while i < len(snake_case_ ) and not os.path.isfile(os.path.join(snake_case_ , f"""{module}.py""" ) ):
i += 1
if i < len(snake_case_ ):
_UpperCAmelCase = os.path.join(snake_case_ , parts[i] )
if i >= len(snake_case_ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(snake_case_ , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_UpperCAmelCase = ""
_UpperCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case_ ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case_ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_UpperCAmelCase = line_index
while line_index < len(snake_case_ ) and _should_continue(lines[line_index] , snake_case_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
return "".join(snake_case_ )
lowercase_ : Tuple = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowercase_ : List[Any] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
lowercase_ : List[str] = re.compile(r'<FILL\s+[^>]*>')
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = code.split("\n" )
_UpperCAmelCase = 0
while idx < len(snake_case_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(snake_case_ ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = len(get_indent(snake_case_ ) ) > 0
if has_indent:
_UpperCAmelCase = f"""class Bla:\n{code}"""
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case_ )
_UpperCAmelCase = black.format_str(snake_case_ , mode=snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = style_docstrings_in_code(snake_case_ )
return result[len("class Bla:\n" ) :] if has_indent else result
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_=False ):
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case_ ):
_UpperCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = search.groups()
_UpperCAmelCase = find_code_in_diffusers(snake_case_ )
_UpperCAmelCase = get_indent(snake_case_ )
_UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_UpperCAmelCase = theoretical_indent
_UpperCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_UpperCAmelCase = True
while line_index < len(snake_case_ ) and should_continue:
line_index += 1
if line_index >= len(snake_case_ ):
break
_UpperCAmelCase = lines[line_index]
_UpperCAmelCase = _should_continue(snake_case_ , snake_case_ ) and re.search(f"""^{indent}# End copy""" , snake_case_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_UpperCAmelCase = lines[start_index:line_index]
_UpperCAmelCase = "".join(snake_case_ )
# Remove any nested `Copied from` comments to avoid circular copies
_UpperCAmelCase = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(snake_case_ ) is None]
_UpperCAmelCase = "\n".join(snake_case_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case_ ) > 0:
_UpperCAmelCase = replace_pattern.replace("with" , "" ).split("," )
_UpperCAmelCase = [_re_replace_pattern.search(snake_case_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = pattern.groups()
_UpperCAmelCase = re.sub(snake_case_ , snake_case_ , snake_case_ )
if option.strip() == "all-casing":
_UpperCAmelCase = re.sub(obja.lower() , obja.lower() , snake_case_ )
_UpperCAmelCase = re.sub(obja.upper() , obja.upper() , snake_case_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_UpperCAmelCase = start_index + 1
if overwrite and len(snake_case_ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(snake_case_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(snake_case_ )
return diffs
def __SCREAMING_SNAKE_CASE ( snake_case_ = False ):
'''simple docstring'''
_UpperCAmelCase = glob.glob(os.path.join(snake_case_ , "**/*.py" ) , recursive=snake_case_ )
_UpperCAmelCase = []
for filename in all_files:
_UpperCAmelCase = is_copy_consistent(snake_case_ , snake_case_ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(snake_case_ ) > 0:
_UpperCAmelCase = "\n".join(snake_case_ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase_ : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 133 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
snake_case_ : Tuple = StableDiffusionLDMaDPipeline
snake_case_ : Optional[int] = TEXT_TO_IMAGE_PARAMS
snake_case_ : str = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_UpperCAmelCase = CLIPTextModel(snake_case__ )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
_UpperCAmelCase = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase = ldmad_pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
_UpperCAmelCase = text_inputs["input_ids"].to(snake_case__ )
_UpperCAmelCase = ldmad_pipe.text_encoder(snake_case__ )[0]
_UpperCAmelCase = prompt_embeds
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=snake_case__ )
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case__ )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_dummy_inputs(snake_case__ )
_UpperCAmelCase = "french fries"
_UpperCAmelCase = ldmad_pipe(**snake_case__ , negative_prompt=snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
_UpperCAmelCase = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : str , snake_case__ : Optional[int] , snake_case__ : Tuple="cpu" , snake_case__ : Any=torch.floataa , snake_case__ : Dict=0 ):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_UpperCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
_UpperCAmelCase = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1].flatten()
_UpperCAmelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCAmelCase = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
_UpperCAmelCase = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any , snake_case__ : Optional[Any] , snake_case__ : int="cpu" , snake_case__ : Optional[Any]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
_UpperCAmelCase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.495_586
_UpperCAmelCase = 0.33_795_515
_UpperCAmelCase = 112.48_518
_UpperCAmelCase = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = self.get_inputs(snake_case__ )
_UpperCAmelCase = ldmad_pipe(**snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.4_194_127
_UpperCAmelCase = 0.35_375_586
_UpperCAmelCase = 0.5_638_502
_UpperCAmelCase = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 133 | 1 |
import os
from collections.abc import Iterator
def lowerCAmelCase_ ( UpperCamelCase_ = "." ) -> Tuple:
for dir_path, dir_names, filenames in os.walk(a_ ):
UpperCamelCase_ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a_ )[1] in (".py", ".ipynb"):
yield os.path.join(a_ , a_ ).lstrip("./" )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Tuple:
return F'''{i * " "}*''' if i else "\n##"
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a_ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(a_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def lowerCAmelCase_ ( UpperCamelCase_ = "." ) -> List[Any]:
UpperCamelCase_ = ""
for filepath in sorted(good_file_paths(a_ ) ):
UpperCamelCase_ = os.path.split(a_ )
if filepath != old_path:
UpperCamelCase_ = print_path(a_ , a_ )
UpperCamelCase_ = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCamelCase_ = F'''{filepath}/{filename}'''.replace(" " , "%20" )
UpperCamelCase_ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F'''{md_prefix(a_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('.')
| 367 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCAmelCase = 'Enter the base and the power separated by a comma: '
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCAmelCase = res(xa, ya)
_UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 328 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Dict = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = '''cvt'''
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=[7, 3, 3] , __lowerCAmelCase : Optional[int]=[4, 2, 2] , __lowerCAmelCase : List[Any]=[2, 1, 1] , __lowerCAmelCase : Tuple=[64, 1_92, 3_84] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : Dict=[1, 2, 10] , __lowerCAmelCase : Any=[4.0, 4.0, 4.0] , __lowerCAmelCase : str=[0.0, 0.0, 0.0] , __lowerCAmelCase : List[Any]=[0.0, 0.0, 0.0] , __lowerCAmelCase : Dict=[0.0, 0.0, 0.1] , __lowerCAmelCase : Optional[int]=[True, True, True] , __lowerCAmelCase : Optional[int]=[False, False, True] , __lowerCAmelCase : str=["dw_bn", "dw_bn", "dw_bn"] , __lowerCAmelCase : int=[3, 3, 3] , __lowerCAmelCase : Dict=[1, 1, 1] , __lowerCAmelCase : Union[str, Any]=[2, 2, 2] , __lowerCAmelCase : List[str]=[1, 1, 1] , __lowerCAmelCase : Tuple=[1, 1, 1] , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : Any=1e-12 , **__lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = num_channels
A__ = patch_sizes
A__ = patch_stride
A__ = patch_padding
A__ = embed_dim
A__ = num_heads
A__ = depth
A__ = mlp_ratio
A__ = attention_drop_rate
A__ = drop_rate
A__ = drop_path_rate
A__ = qkv_bias
A__ = cls_token
A__ = qkv_projection_method
A__ = kernel_qkv
A__ = padding_kv
A__ = stride_kv
A__ = padding_q
A__ = stride_q
A__ = initializer_range
A__ = layer_norm_eps
| 274 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __lowerCamelCase ( __a :Dict ) -> Any:
"""simple docstring"""
A__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def __lowerCamelCase ( __a :str ) -> Union[str, Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(__a , __a , bias=__a )
A__ = emb.weight.data
return lin_layer
def __lowerCamelCase ( __a :str ) -> List[str]:
"""simple docstring"""
A__ = torch.load(__a , map_location="""cpu""" )
A__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
A__ = checkpoint["""model"""]
remove_ignore_keys_(__a )
A__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
A__ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
A__ = XGLMConfig(
vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
A__ = XGLMForCausalLM(__a )
A__ = model.load_state_dict(__a , strict=__a )
print(__a )
A__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A : str = parser.parse_args()
A : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 274 | 1 |
from __future__ import annotations
lowercase : int = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase_ (_lowerCAmelCase : list[list[int]] , _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : list[list[int]] , ):
__UpperCamelCase : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCAmelCase ) )
] # the reference grid
__UpperCamelCase : Dict = 1
__UpperCamelCase : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCAmelCase ) )
] # the action grid
__UpperCamelCase : Any = init[0]
__UpperCamelCase : List[str] = init[1]
__UpperCamelCase : List[str] = 0
__UpperCamelCase : List[str] = g + heuristic[x][y] # cost from starting cell to destination cell
__UpperCamelCase : Any = [[f, g, x, y]]
__UpperCamelCase : List[Any] = False # flag that is set when search is complete
__UpperCamelCase : List[str] = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCAmelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__UpperCamelCase : int = cell.pop()
__UpperCamelCase : Optional[Any] = next_cell[2]
__UpperCamelCase : str = next_cell[3]
__UpperCamelCase : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__UpperCamelCase : Dict = True
else:
for i in range(len(_lowerCAmelCase ) ): # to try out different valid actions
__UpperCamelCase : Any = x + DIRECTIONS[i][0]
__UpperCamelCase : int = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__UpperCamelCase : Any = g + cost
__UpperCamelCase : str = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__UpperCamelCase : List[Any] = 1
__UpperCamelCase : int = i
__UpperCamelCase : List[str] = []
__UpperCamelCase : List[Any] = goal[0]
__UpperCamelCase : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__UpperCamelCase : Union[str, Any] = x - DIRECTIONS[action[x][y]][0]
__UpperCamelCase : Any = y - DIRECTIONS[action[x][y]][1]
__UpperCamelCase : Any = xa
__UpperCamelCase : int = ya
invpath.append([x, y] )
__UpperCamelCase : List[Any] = []
for i in range(len(_lowerCAmelCase ) ):
path.append(invpath[len(_lowerCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowercase : Dict = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowercase : Optional[int] = [0, 0]
# all coordinates are given in format [y,x]
lowercase : List[Any] = [len(grid) - 1, len(grid[0]) - 1]
lowercase : Any = 1
# the cost map which pushes the path closer to the goal
lowercase : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowercase : Any = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowercase : List[str] = 99
lowercase , lowercase : List[str] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 171 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : nn.Module
lowercase : List[nn.Module] = field(default_factory=lowerCamelCase__ )
lowercase : list = field(default_factory=lowerCamelCase__ )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(__UpperCamelCase , nn.Convad ) or isinstance(__UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__UpperCamelCase )
def __call__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return list(filter(lambda __UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : nn.Module
lowercase : nn.Module
lowercase : int = 0
lowercase : List = field(default_factory=lowerCamelCase__ )
lowercase : List = field(default_factory=lowerCamelCase__ )
def __call__( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = Tracker(self.dest )(__UpperCamelCase ).parametrized
__UpperCamelCase : Union[str, Any] = Tracker(self.src )(__UpperCamelCase ).parametrized
__UpperCamelCase : Union[str, Any] = list(filter(lambda __UpperCamelCase : type(__UpperCamelCase ) not in self.src_skip , __UpperCamelCase ) )
__UpperCamelCase : Any = list(filter(lambda __UpperCamelCase : type(__UpperCamelCase ) not in self.dest_skip , __UpperCamelCase ) )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(__UpperCamelCase )} operations while'''
f''' destination module has {len(__UpperCamelCase )}.''' )
for dest_m, src_m in zip(__UpperCamelCase , __UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : ResNetConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
__UpperCamelCase : Optional[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(_lowerCAmelCase ).eval()
__UpperCamelCase : Any = ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase )
__UpperCamelCase : Optional[int] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_lowerCAmelCase )
assert torch.allclose(from_model(_lowerCAmelCase ) , our_model(_lowerCAmelCase ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Tuple = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
# we can use the convnext one
__UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
print(F'''Pushed {checkpoint_name}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Dict = 10_00
__UpperCamelCase : Any = (1, num_labels)
__UpperCamelCase : Union[str, Any] = "huggingface/label-files"
__UpperCamelCase : List[Any] = num_labels
__UpperCamelCase : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__UpperCamelCase : List[str] = idalabel
__UpperCamelCase : str = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Dict = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__UpperCamelCase : List[str] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 171 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowercase__ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : int = AutoencoderKL
A_ : Optional[Any] = """sample"""
A_ : Dict = 1E-2
@property
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = 4
_SCREAMING_SNAKE_CASE : Any = 3
_SCREAMING_SNAKE_CASE : List[str] = (32, 32)
_SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
return {"sample": image}
@property
def UpperCAmelCase_ ( self ):
return (3, 32, 32)
@property
def UpperCAmelCase_ ( self ):
return (3, 32, 32)
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_SCREAMING_SNAKE_CASE : str = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def UpperCAmelCase_ ( self ):
# enable deterministic behavior for gradient checkpointing
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : str = self.model_class(**__snake_case )
model.to(__snake_case )
assert not model.is_gradient_checkpointing and model.training
_SCREAMING_SNAKE_CASE : Tuple = model(**__snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_SCREAMING_SNAKE_CASE : List[str] = torch.randn_like(__snake_case )
_SCREAMING_SNAKE_CASE : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_class(**__snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_SCREAMING_SNAKE_CASE : List[Any] = model_a(**__snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_SCREAMING_SNAKE_CASE : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
_SCREAMING_SNAKE_CASE : List[str] = dict(model.named_parameters() )
_SCREAMING_SNAKE_CASE : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__snake_case )
_SCREAMING_SNAKE_CASE : str = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
_SCREAMING_SNAKE_CASE : List[str] = model.to(__snake_case )
model.eval()
if torch_device == "mps":
_SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=__snake_case ).manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_SCREAMING_SNAKE_CASE : int = image.to(__snake_case )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : str = model(__snake_case , sample_posterior=__snake_case , generator=__snake_case ).sample
_SCREAMING_SNAKE_CASE : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_SCREAMING_SNAKE_CASE : Any = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
_SCREAMING_SNAKE_CASE : str = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
_SCREAMING_SNAKE_CASE : Any = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__snake_case , __snake_case , rtol=1e-2 ) )
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__snake_case ) for s in shape] )}.npy"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self , __snake_case=0 , __snake_case=(4, 3, 512, 512) , __snake_case=False ):
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.floataa if fpaa else torch.floataa
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(__snake_case , __snake_case ) ) ).to(__snake_case ).to(__snake_case )
return image
def UpperCAmelCase_ ( self , __snake_case="CompVis/stable-diffusion-v1-4" , __snake_case=False ):
_SCREAMING_SNAKE_CASE : List[Any] = """fp16""" if fpaa else None
_SCREAMING_SNAKE_CASE : str = torch.floataa if fpaa else torch.floataa
_SCREAMING_SNAKE_CASE : Tuple = AutoencoderKL.from_pretrained(
__snake_case , subfolder="""vae""" , torch_dtype=__snake_case , revision=__snake_case , )
model.to(__snake_case ).eval()
return model
def UpperCAmelCase_ ( self , __snake_case=0 ):
if torch_device == "mps":
return torch.manual_seed(__snake_case )
return torch.Generator(device=__snake_case ).manual_seed(__snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : Any = self.get_sd_image(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = self.get_generator(__snake_case )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__snake_case , generator=__snake_case , sample_posterior=__snake_case ).sample
assert sample.shape == image.shape
_SCREAMING_SNAKE_CASE : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__snake_case , __snake_case , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.get_sd_vae_model(fpaa=__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_image(__snake_case , fpaa=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_generator(__snake_case )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(__snake_case , generator=__snake_case , sample_posterior=__snake_case ).sample
assert sample.shape == image.shape
_SCREAMING_SNAKE_CASE : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_SCREAMING_SNAKE_CASE : str = torch.tensor(__snake_case )
assert torch_all_close(__snake_case , __snake_case , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(__snake_case )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model(__snake_case ).sample
assert sample.shape == image.shape
_SCREAMING_SNAKE_CASE : Tuple = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_SCREAMING_SNAKE_CASE : int = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__snake_case , __snake_case , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(__snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(__snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_SCREAMING_SNAKE_CASE : List[str] = sample[-1, -2:, :2, -2:].flatten().cpu()
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(__snake_case )
assert torch_all_close(__snake_case , __snake_case , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = self.get_sd_vae_model(fpaa=__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(__snake_case , shape=(3, 4, 64, 64) , fpaa=__snake_case )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Dict = model.decode(__snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_SCREAMING_SNAKE_CASE : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(__snake_case )
assert torch_all_close(__snake_case , __snake_case , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model(fpaa=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(__snake_case , shape=(3, 4, 64, 64) , fpaa=__snake_case )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[int] = model.decode(__snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model.decode(__snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__snake_case , __snake_case , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_image(__snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Tuple = model.decode(__snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model.decode(__snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__snake_case , __snake_case , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model()
_SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_image(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = self.get_generator(__snake_case )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(__snake_case ).latent_dist
_SCREAMING_SNAKE_CASE : str = dist.sample(generator=__snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_SCREAMING_SNAKE_CASE : Dict = sample[0, -1, -3:, -3:].flatten().cpu()
_SCREAMING_SNAKE_CASE : Any = torch.tensor(__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(__snake_case , __snake_case , atol=__snake_case )
| 200 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 100_0000 , SCREAMING_SNAKE_CASE__ = 10 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : defaultdict = defaultdict(SCREAMING_SNAKE_CASE__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_SCREAMING_SNAKE_CASE : int = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_SCREAMING_SNAKE_CASE : List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 200 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = dataset
UpperCamelCase = process
UpperCamelCase = params
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.dataset[i]
UpperCamelCase = self.process(UpperCamelCase__ , **self.params )
return processed
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=None ):
"""simple docstring"""
UpperCamelCase = loader
UpperCamelCase = infer
UpperCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCamelCase = None
UpperCamelCase = loader_batch_size
# Internal bookkeeping
UpperCamelCase = None
UpperCamelCase = None
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.loader )
def __iter__( self : Any ):
"""simple docstring"""
UpperCamelCase = iter(self.loader )
return self
def A ( self : Dict ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
UpperCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCamelCase = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def A ( self : Optional[Any] ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCamelCase = next(self.iterator )
UpperCamelCase = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase = processed
else:
UpperCamelCase = list(processed.keys() )[0]
UpperCamelCase = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = len(UpperCamelCase__ )
else:
UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
UpperCamelCase = processed
UpperCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int=None ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = iter(self.loader )
UpperCamelCase = None
return self
def A ( self : Optional[Any] ):
"""simple docstring"""
if self.subiterator is None:
UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
UpperCamelCase = next(self.subiterator )
return processed
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __iter__( self : Dict ):
"""simple docstring"""
UpperCamelCase = iter(self.loader )
return self
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase = self.loader_batch_item()
UpperCamelCase = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
UpperCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase = processed
else:
UpperCamelCase = list(processed.keys() )[0]
UpperCamelCase = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = len(UpperCamelCase__ )
else:
UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCamelCase = observed_batch_size
UpperCamelCase = processed
UpperCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCamelCase = self.loader_batch_item()
UpperCamelCase = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
UpperCamelCase = processed
UpperCamelCase = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
return accumulator
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Dataset , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = dataset
UpperCamelCase = key
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : List[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
return self.dataset[i][self.key]
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Dataset , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = dataset
UpperCamelCase = keya
UpperCamelCase = keya
def __len__( self : int ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 359 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase , UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
UpperCamelCase = np.array(A__ ).astype(np.floataa ) / 255.0
UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : VQModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : Optional[int] = 1_0_0 , UpperCamelCase__ : Optional[float] = 0.0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCamelCase = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
UpperCamelCase = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}""" )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
UpperCamelCase = preprocess(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase = next(self.unet.parameters() ).dtype
UpperCamelCase = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
UpperCamelCase = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase = torch.cat([latents, image] , dim=1 )
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase = self.vqvae.decode(UpperCamelCase__ ).sample
UpperCamelCase = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
UpperCamelCase = image / 2 + 0.5
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 249 | 0 |
import math
def _a ( lowerCamelCase, lowerCamelCase ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 287 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = "cpu" , __lowerCamelCase = None ) -> None:
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowercase__ : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Any = src_path
torch.save(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 302 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 1.6021E-19 # units = C
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Optional[int] ={'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =[
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
def A_ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowercase__ : List[str] = generate_large_matrix()
lowercase__ : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_ ( snake_case : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid )
assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) )
def A_ ( snake_case : list[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(snake_case ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case ) * len(grid[0] )) - total
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(snake_case ):
if number < 0:
total += len(snake_case ) - i
break
return total
def A_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple ) -> int:
"""simple docstring"""
return (-y * np.log(_snake_case ) - (1 - y) * np.log(1 - h )).mean()
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = np.dot(_snake_case , _snake_case )
return np.sum(y * scores - np.log(1 + np.exp(_snake_case ) ) )
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any]=70_000 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = np.zeros(x.shape[1] )
for iterations in range(_snake_case ):
lowerCAmelCase = np.dot(_snake_case , _snake_case )
lowerCAmelCase = sigmoid_function(_snake_case )
lowerCAmelCase = np.dot(x.T , h - y ) / y.size
lowerCAmelCase = theta - alpha * gradient # updating the weights
lowerCAmelCase = np.dot(_snake_case , _snake_case )
lowerCAmelCase = sigmoid_function(_snake_case )
lowerCAmelCase = cost_function(_snake_case , _snake_case )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase = datasets.load_iris()
UpperCAmelCase = iris.data[:, :2]
UpperCAmelCase = (iris.target != 0) * 1
UpperCAmelCase = 0.1
UpperCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
return sigmoid_function(
np.dot(_snake_case , _snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
(UpperCAmelCase) = (x[:, 0].min(), x[:, 0].max())
(UpperCAmelCase) = (x[:, 1].min(), x[:, 1].max())
(UpperCAmelCase) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 369 |
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase = []
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[row][i] == 1:
return False
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_SCREAMING_SNAKE_CASE , -1 , -1 ) , range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if row >= len(_SCREAMING_SNAKE_CASE ):
solution.append(_SCREAMING_SNAKE_CASE )
printboard(_SCREAMING_SNAKE_CASE )
print()
return True
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_safe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = 1
solve(_SCREAMING_SNAKE_CASE , row + 1 )
lowerCAmelCase = 0
return False
def _snake_case ( _SCREAMING_SNAKE_CASE : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase = 8
UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 187 | 0 |
"""simple docstring"""
from math import factorial, pi
def a__ ( lowerCAmelCase , lowerCAmelCase = 30 ) -> float:
if not isinstance(lowerCAmelCase , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
UpperCAmelCase__ : List[Any] = float(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase ) )
def a__ ( lowerCAmelCase , lowerCAmelCase = 30 ) -> float:
if not isinstance(lowerCAmelCase , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
UpperCAmelCase__ : int = float(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 171 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_A = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def a__ ( ) -> str:
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Dict = g.get_repo("""huggingface/accelerate""" )
UpperCAmelCase__ : str = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = comments[0] if len(lowerCAmelCase ) > 0 else None
UpperCAmelCase__ : Optional[Any] = dt.utcnow()
UpperCAmelCase__ : List[str] = (current_time - issue.updated_at).days
UpperCAmelCase__ : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 171 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = ort.SessionOptions()
UpperCAmelCase_ = False
return options
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
UpperCAmelCase_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = "A red cat sitting on a park bench"
UpperCAmelCase_ = np.random.RandomState(0 )
UpperCAmelCase_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_UpperCAmelCase , output_type="np" , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 241 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.