code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class __a :
def __init__( self : int ,lowerCamelCase : str = None ,lowerCamelCase : uuid.UUID = None ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
if not conversation_id:
__SCREAMING_SNAKE_CASE = uuid.uuida()
if past_user_inputs is None:
__SCREAMING_SNAKE_CASE = []
if generated_responses is None:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = conversation_id
__SCREAMING_SNAKE_CASE = past_user_inputs
__SCREAMING_SNAKE_CASE = generated_responses
__SCREAMING_SNAKE_CASE = text
def __eq__( self : str ,lowerCamelCase : Tuple ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ,lowerCamelCase : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
__SCREAMING_SNAKE_CASE = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__SCREAMING_SNAKE_CASE = text
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : str ):
'''simple docstring'''
self.generated_responses.append(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__SCREAMING_SNAKE_CASE = """user""" if is_user else """bot"""
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_snake_case, R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ', )
class __a ( _snake_case ):
def __init__( self : str ,*lowerCamelCase : Tuple ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase ,**lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__SCREAMING_SNAKE_CASE = self.tokenizer.eos_token
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Tuple=None ,lowerCamelCase : Union[str, Any]=None ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if min_length_for_response is not None:
__SCREAMING_SNAKE_CASE = min_length_for_response
if minimum_tokens is not None:
__SCREAMING_SNAKE_CASE = minimum_tokens
if "max_length" in generate_kwargs:
__SCREAMING_SNAKE_CASE = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] ,lowerCamelCase : Union[Conversation, List[Conversation]] ,lowerCamelCase : Tuple=0 ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().__call__(lowerCamelCase ,num_workers=lowerCamelCase ,**lowerCamelCase )
if isinstance(lowerCamelCase ,lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Conversation ,lowerCamelCase : Optional[int]=32 ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer._build_conversation_input_ids(lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__SCREAMING_SNAKE_CASE = self._legacy_parse_and_tokenize(lowerCamelCase )
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple=10 ,**lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__SCREAMING_SNAKE_CASE = max_length - minimum_tokens
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__SCREAMING_SNAKE_CASE = model_inputs["""attention_mask"""][:, -trim:]
__SCREAMING_SNAKE_CASE = model_inputs.pop("""conversation""" )
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = self.model.generate(**lowerCamelCase ,**lowerCamelCase )
if self.model.config.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = 1
else:
__SCREAMING_SNAKE_CASE = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs["""output_ids"""]
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=lowerCamelCase ,clean_up_tokenization_spaces=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowerCamelCase )
return conversation
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Conversation ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.eos_token_id
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) )
if len(lowerCamelCase ) > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 13 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = FunnelTokenizer
__UpperCamelCase : Dict = FunnelTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase__ ( self : List[Any] ,**lowerCamelCase : str ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
__SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[7, 4, 5, 10, 8, 9] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
__SCREAMING_SNAKE_CASE = tokenizer("""UNwant\u00E9d,running""" )
__SCREAMING_SNAKE_CASE = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] ,[2] + [0] * sentence_len )
__SCREAMING_SNAKE_CASE = tokenizer("""UNwant\u00E9d,running""" ,"""UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , __UpperCAmelCase )
for row in range(__UpperCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[col][row] / matrix[row][row]
for i in range(__UpperCAmelCase , __UpperCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__SCREAMING_SNAKE_CASE = True
for i in range(row + 1 , __UpperCAmelCase ):
if matrix[i][row] != 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = matrix[i], matrix[row]
__SCREAMING_SNAKE_CASE = False
break
if reduce:
rank -= 1
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCAmelCase )
if number < 0:
return False
__SCREAMING_SNAKE_CASE = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 1 |
'''simple docstring'''
import numpy
# List of input, output pairs
a = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a = (((515, 22, 13), 555), ((61, 35, 49), 150))
a = [2, 4, 1, 5]
a = len(train_data)
a = 0.009
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase="train" ) -> Union[str, Any]:
'''simple docstring'''
return calculate_hypothesis_value(__UpperCAmelCase , __UpperCAmelCase ) - output(
__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for i in range(len(__UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=m ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for i in range(__UpperCAmelCase ):
if index == -1:
summation_value += _error(__UpperCAmelCase )
else:
summation_value += _error(__UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = summation_of_cost_derivative(__UpperCAmelCase , __UpperCAmelCase ) / m
return cost_derivative_value
def __magic_name__ ( ) -> Any:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__SCREAMING_SNAKE_CASE = 0.0_0_0_0_0_2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while True:
j += 1
__SCREAMING_SNAKE_CASE = [0, 0, 0, 0]
for i in range(0 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = get_cost_derivative(i - 1 )
__SCREAMING_SNAKE_CASE = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCAmelCase , __UpperCAmelCase , atol=__UpperCAmelCase , rtol=__UpperCAmelCase , ):
break
__SCREAMING_SNAKE_CASE = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __magic_name__ ( ) -> str:
'''simple docstring'''
for i in range(len(__UpperCAmelCase ) ):
print(("""Actual output value:""", output(__UpperCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__UpperCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 13 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 1 |
'''simple docstring'''
from math import pi, sqrt, tan
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
__SCREAMING_SNAKE_CASE = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(__UpperCAmelCase , 2 ) * torus_radius * tube_radius
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
__SCREAMING_SNAKE_CASE = (sidea + sidea + sidea) / 2
__SCREAMING_SNAKE_CASE = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F'''Rectangle: {area_rectangle(10, 20) = }''')
print(F'''Square: {area_square(10) = }''')
print(F'''Triangle: {area_triangle(10, 10) = }''')
print(F'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(F'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(F'''Rhombus: {area_rhombus(10, 20) = }''')
print(F'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(F'''Circle: {area_circle(20) = }''')
print(F'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(F'''Cube: {surface_area_cube(20) = }''')
print(F'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(F'''Sphere: {surface_area_sphere(20) = }''')
print(F'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(F'''Cone: {surface_area_cone(10, 20) = }''')
print(F'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(F'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(F'''Torus: {surface_area_torus(20, 10) = }''')
print(F'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(F'''Square: {area_reg_polygon(4, 10) = }''')
print(F'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 13 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __magic_name__ ( __UpperCAmelCase = 1500000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = defaultdict(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __UpperCAmelCase , 2 ):
if gcd(__UpperCAmelCase , __UpperCAmelCase ) > 1:
continue
__SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__UpperCAmelCase , limit + 1 , __UpperCAmelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
_validate_point(__UpperCAmelCase )
_validate_point(__UpperCAmelCase )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__UpperCAmelCase , __UpperCAmelCase ) ) )
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
if point:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for item in point:
if not isinstance(__UpperCAmelCase , (int, float) ):
__SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
f"""{type(__UpperCAmelCase ).__name__}"""
)
raise TypeError(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = f"""Expected a list of numbers as input, found {type(__UpperCAmelCase ).__name__}"""
raise TypeError(__UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
_validate_point(__UpperCAmelCase )
_validate_point(__UpperCAmelCase )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__UpperCAmelCase , __UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__SCREAMING_SNAKE_CASE = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,lowerCamelCase )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
# load decoder from hub
__SCREAMING_SNAKE_CASE = """hf-internal-testing/ngram-beam-search-decoder"""
def UpperCAmelCase__ ( self : List[Any] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,**lowerCamelCase : List[str] ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : int ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase ,feature_extractor=lowerCamelCase ,decoder=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCamelCase ,"""include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCamelCase ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase ,feature_extractor=lowerCamelCase ,decoder=lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_list((3, 1000) )
__SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(lowerCamelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase ,feature_extractor=lowerCamelCase ,decoder=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """This is a test string"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any]=(2, 10, 16) ,lowerCamelCase : Tuple=77 ):
'''simple docstring'''
np.random.seed(lowerCamelCase )
return np.random.rand(*lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase ,feature_extractor=lowerCamelCase ,decoder=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
__SCREAMING_SNAKE_CASE = processor.decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = decoder.decode_beams(lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase ,feature_extractor=lowerCamelCase ,decoder=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase )
else:
with get_context(lowerCamelCase ).Pool() as pool:
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
with get_context("""fork""" ).Pool() as p:
__SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCamelCase ,decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text )
self.assertListEqual(lowerCamelCase ,decoded_processor.logit_score )
self.assertListEqual(lowerCamelCase ,decoded_processor.lm_score )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase ,feature_extractor=lowerCamelCase ,decoder=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
__SCREAMING_SNAKE_CASE = 15
__SCREAMING_SNAKE_CASE = -20.0
__SCREAMING_SNAKE_CASE = -4.0
__SCREAMING_SNAKE_CASE = processor.batch_decode(
lowerCamelCase ,beam_width=lowerCamelCase ,beam_prune_logp=lowerCamelCase ,token_min_logp=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = decoded_processor_out.text
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
with get_context("""fork""" ).Pool() as pool:
__SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(
lowerCamelCase ,lowerCamelCase ,beam_width=lowerCamelCase ,beam_prune_logp=lowerCamelCase ,token_min_logp=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = [d[0][0] for d in decoded_decoder_out]
__SCREAMING_SNAKE_CASE = [d[0][2] for d in decoded_decoder_out]
__SCREAMING_SNAKE_CASE = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,lowerCamelCase )
self.assertTrue(np.array_equal(lowerCamelCase ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] ,lowerCamelCase ,atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCamelCase ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] ,lowerCamelCase ,atol=1E-3 ) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase ,feature_extractor=lowerCamelCase ,decoder=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
__SCREAMING_SNAKE_CASE = 2.0
__SCREAMING_SNAKE_CASE = 5.0
__SCREAMING_SNAKE_CASE = -20.0
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = processor.batch_decode(
lowerCamelCase ,alpha=lowerCamelCase ,beta=lowerCamelCase ,unk_score_offset=lowerCamelCase ,lm_score_boundary=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = decoded_processor_out.text
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
decoder.reset_params(
alpha=lowerCamelCase ,beta=lowerCamelCase ,unk_score_offset=lowerCamelCase ,lm_score_boundary=lowerCamelCase ,)
with get_context("""fork""" ).Pool() as pool:
__SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(
lowerCamelCase ,lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
__SCREAMING_SNAKE_CASE = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
__SCREAMING_SNAKE_CASE = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase )
__SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__SCREAMING_SNAKE_CASE = floats_list((3, 1000) )
__SCREAMING_SNAKE_CASE = processor_wavaveca(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor_auto(lowerCamelCase ,return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1E-2 )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
__SCREAMING_SNAKE_CASE = processor_wavaveca.batch_decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = processor_auto.batch_decode(lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase ,feature_extractor=lowerCamelCase ,decoder=lowerCamelCase )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()[0]
__SCREAMING_SNAKE_CASE = processor.decode(lowerCamelCase ,output_word_offsets=lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase ,lowerCamelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase ,output_word_offsets=lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase ,lowerCamelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCamelCase ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=lowerCamelCase )
__SCREAMING_SNAKE_CASE = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=1_6000 ) )
__SCREAMING_SNAKE_CASE = iter(lowerCamelCase )
__SCREAMING_SNAKE_CASE = next(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__SCREAMING_SNAKE_CASE = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ).logits.cpu().numpy()
__SCREAMING_SNAKE_CASE = processor.decode(logits[0] ,output_word_offsets=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__SCREAMING_SNAKE_CASE = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__SCREAMING_SNAKE_CASE = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase ,"""word""" ) ) ,lowerCamelCase )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase ,"""word""" ) ) ,output.text )
# output times
__SCREAMING_SNAKE_CASE = torch.tensor(self.get_from_offsets(lowerCamelCase ,"""start_time""" ) )
__SCREAMING_SNAKE_CASE = torch.tensor(self.get_from_offsets(lowerCamelCase ,"""end_time""" ) )
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
__SCREAMING_SNAKE_CASE = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=0.01 ) )
| 13 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a = logging.get_logger(__name__)
a = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
a = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
a = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
a = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
a = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
a = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
a = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
a = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
a = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
a = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
a = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
a = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
a = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
a = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : int = FLAX_MODEL_MAPPING
a = auto_class_update(FlaxAutoModel)
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : Any = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : int = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __a ( _BaseAutoModelClass ):
__UpperCamelCase : Union[str, Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 13 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a ( _snake_case ):
def __init__( self : str ,lowerCamelCase : AutoencoderKL ,lowerCamelCase : CLIPTextModel ,lowerCamelCase : CLIPTokenizer ,lowerCamelCase : UNetaDConditionModel ,lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,lowerCamelCase : StableDiffusionSafetyChecker ,lowerCamelCase : CLIPImageProcessor ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,unet=lowerCamelCase ,scheduler=lowerCamelCase ,safety_checker=lowerCamelCase ,feature_extractor=lowerCamelCase ,)
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Optional[Any] ,lowerCamelCase : Union[str, List[str]] ,lowerCamelCase : int = 512 ,lowerCamelCase : int = 512 ,lowerCamelCase : int = 50 ,lowerCamelCase : float = 7.5 ,lowerCamelCase : Optional[Union[str, List[str]]] = None ,lowerCamelCase : Optional[int] = 1 ,lowerCamelCase : float = 0.0 ,lowerCamelCase : Optional[torch.Generator] = None ,lowerCamelCase : Optional[torch.FloatTensor] = None ,lowerCamelCase : Optional[str] = "pil" ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase : int = 1 ,lowerCamelCase : Optional[torch.FloatTensor] = None ,**lowerCamelCase : Any ,):
'''simple docstring'''
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase ,lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCamelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_embeddings.shape
__SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 ,lowerCamelCase ,1 )
__SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt ,lowerCamelCase ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""""""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
f""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCamelCase ,padding="""max_length""" ,max_length=lowerCamelCase ,truncation=lowerCamelCase ,return_tensors="""pt""" ,)
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(lowerCamelCase ,lowerCamelCase ,1 )
__SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt ,lowerCamelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE = torch.randn(
lowerCamelCase ,generator=lowerCamelCase ,device="""cpu""" ,dtype=lowerCamelCase ).to(self.device )
__SCREAMING_SNAKE_CASE = torch.randn(lowerCamelCase ,generator=lowerCamelCase ,device="""cpu""" ,dtype=lowerCamelCase ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE = torch.randn(
lowerCamelCase ,generator=lowerCamelCase ,device=self.device ,dtype=lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.randn(lowerCamelCase ,generator=lowerCamelCase ,device=self.device ,dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE = latents_reference.to(self.device )
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__SCREAMING_SNAKE_CASE = (latents_shape[3] - latents_shape_reference[3]) // 2
__SCREAMING_SNAKE_CASE = (latents_shape[2] - latents_shape_reference[2]) // 2
__SCREAMING_SNAKE_CASE = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__SCREAMING_SNAKE_CASE = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__SCREAMING_SNAKE_CASE = 0 if dx < 0 else dx
__SCREAMING_SNAKE_CASE = 0 if dy < 0 else dy
__SCREAMING_SNAKE_CASE = max(-dx ,0 )
__SCREAMING_SNAKE_CASE = max(-dy ,0 )
# import pdb
# pdb.set_trace()
__SCREAMING_SNAKE_CASE = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(lowerCamelCase ,lowerCamelCase )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(lowerCamelCase ,lowerCamelCase ,encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,**lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(lowerCamelCase ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) ,return_tensors="""pt""" ).to(
self.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.safety_checker(
images=lowerCamelCase ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__SCREAMING_SNAKE_CASE = None
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase ,nsfw_content_detected=lowerCamelCase )
| 13 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = torch.device("cpu")
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dct.pop(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = val
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for k in state_dict.keys():
__SCREAMING_SNAKE_CASE = k
if ".pwconv" in k:
__SCREAMING_SNAKE_CASE = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
__SCREAMING_SNAKE_CASE = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
__SCREAMING_SNAKE_CASE = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
__SCREAMING_SNAKE_CASE = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
__SCREAMING_SNAKE_CASE = k_new.split(""".""" )
if ls[2].isdigit():
__SCREAMING_SNAKE_CASE = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__SCREAMING_SNAKE_CASE = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__SCREAMING_SNAKE_CASE = [3, 3, 6, 4]
__SCREAMING_SNAKE_CASE = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__SCREAMING_SNAKE_CASE = [3, 3, 9, 6]
__SCREAMING_SNAKE_CASE = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__SCREAMING_SNAKE_CASE = [4, 3, 10, 5]
__SCREAMING_SNAKE_CASE = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__SCREAMING_SNAKE_CASE = [4, 4, 12, 6]
__SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="""cpu""" , check_hash=__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = torch.load(__UpperCAmelCase , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = checkpoint
__SCREAMING_SNAKE_CASE = create_rename_keys(__UpperCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
__SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__UpperCAmelCase ).eval()
hf_model.load_state_dict(__UpperCAmelCase )
# prepare test inputs
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
__SCREAMING_SNAKE_CASE = processor(images=__UpperCAmelCase , return_tensors="""pt""" )
# compare outputs from both models
__SCREAMING_SNAKE_CASE = get_expected_output(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __UpperCAmelCase , atol=1e-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
a = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 13 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.max(_outputs , axis=-1 , keepdims=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__UpperCAmelCase )
class __a ( _snake_case ):
__UpperCamelCase : Any = 'sigmoid'
__UpperCamelCase : Tuple = 'softmax'
__UpperCamelCase : Optional[Any] = 'none'
@add_end_docstrings(
_snake_case, R'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ', )
class __a ( _snake_case ):
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[str] = ClassificationFunction.NONE
def __init__( self : str ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Any="" ,**lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tokenizer_kwargs
__SCREAMING_SNAKE_CASE = {}
if hasattr(self.model.config ,"""return_all_scores""" ) and return_all_scores is None:
__SCREAMING_SNAKE_CASE = self.model.config.return_all_scores
if isinstance(lowerCamelCase ,lowerCamelCase ) or top_k is None:
__SCREAMING_SNAKE_CASE = top_k
__SCREAMING_SNAKE_CASE = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" ,lowerCamelCase ,)
if return_all_scores:
__SCREAMING_SNAKE_CASE = None
else:
__SCREAMING_SNAKE_CASE = 1
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
__SCREAMING_SNAKE_CASE = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] ,*lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().__call__(*lowerCamelCase ,**lowerCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
__SCREAMING_SNAKE_CASE = """top_k""" not in kwargs
if isinstance(args[0] ,lowerCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : str ,**lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.framework
if isinstance(lowerCamelCase ,lowerCamelCase ):
return self.tokenizer(**lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase )
elif isinstance(lowerCamelCase ,lowerCamelCase ) and len(lowerCamelCase ) == 1 and isinstance(inputs[0] ,lowerCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=lowerCamelCase ,**lowerCamelCase )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.model(**lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : List[str]=None ,lowerCamelCase : str=1 ,lowerCamelCase : str=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
__SCREAMING_SNAKE_CASE = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
__SCREAMING_SNAKE_CASE = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"""function_to_apply""" ) and function_to_apply is None:
__SCREAMING_SNAKE_CASE = self.model.config.function_to_apply
else:
__SCREAMING_SNAKE_CASE = ClassificationFunction.NONE
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
__SCREAMING_SNAKE_CASE = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
__SCREAMING_SNAKE_CASE = sigmoid(lowerCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
__SCREAMING_SNAKE_CASE = softmax(lowerCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
__SCREAMING_SNAKE_CASE = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
__SCREAMING_SNAKE_CASE = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowerCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCamelCase : x["score"] ,reverse=lowerCamelCase )
if top_k is not None:
__SCREAMING_SNAKE_CASE = dict_scores[:top_k]
return dict_scores
| 13 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 | 1 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class __a :
__UpperCamelCase : str
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[Union[str, int]] = None
__UpperCamelCase : Optional[Union[str, int]] = None
__UpperCamelCase : Optional[Union[str, int]] = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _str_to_version_tuple(self.version_str )
def __repr__( self : Tuple ):
'''simple docstring'''
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.major, self.minor, self.patch
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if isinstance(lowerCamelCase ,lowerCamelCase ):
return Version(lowerCamelCase )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
return other
raise TypeError(f"""{other} (type {type(lowerCamelCase )}) cannot be compared to version.""" )
def __eq__( self : int ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = self._validate_operand(lowerCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Any ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._validate_operand(lowerCamelCase )
return self.tuple < other.tuple
def __hash__( self : List[Any] ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.version_str
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _VERSION_REG.match(__UpperCAmelCase )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(__UpperCAmelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return ".".join(str(__UpperCAmelCase ) for v in version_tuple )
| 13 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 1 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __magic_name__ ( __UpperCAmelCase ) -> datetime:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = year % 19
__SCREAMING_SNAKE_CASE = year % 4
__SCREAMING_SNAKE_CASE = year % 7
__SCREAMING_SNAKE_CASE = math.floor(year / 100 )
__SCREAMING_SNAKE_CASE = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__SCREAMING_SNAKE_CASE = leap_day_inhibits / 4
__SCREAMING_SNAKE_CASE = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__SCREAMING_SNAKE_CASE = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__SCREAMING_SNAKE_CASE = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__SCREAMING_SNAKE_CASE = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCAmelCase , 4 , 18 )
else:
return datetime(__UpperCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
a = "will be" if year > datetime.now().year else "was"
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 13 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
a = 10
def __magic_name__ ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = max(__UpperCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
__SCREAMING_SNAKE_CASE = [[] for _ in range(__UpperCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
__SCREAMING_SNAKE_CASE = int((i / placement) % RADIX )
buckets[tmp].append(__UpperCAmelCase )
# put each buckets' contents into list_of_ints
__SCREAMING_SNAKE_CASE = 0
for b in range(__UpperCAmelCase ):
for i in buckets[b]:
__SCREAMING_SNAKE_CASE = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 1 |
'''simple docstring'''
a = {}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__SCREAMING_SNAKE_CASE = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__SCREAMING_SNAKE_CASE = _calculate(days - 1 , __UpperCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__SCREAMING_SNAKE_CASE = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__SCREAMING_SNAKE_CASE = _calculate(days - 1 , __UpperCAmelCase , 0 )
__SCREAMING_SNAKE_CASE = state_late + state_absent + state_ontime
__SCREAMING_SNAKE_CASE = prizestrings
return prizestrings
def __magic_name__ ( __UpperCAmelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(__UpperCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=__UpperCAmelCase )["""input_ids"""]
__SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
a = HfArgumentParser(PretokenizationArguments)
a = parser.parse_args()
if args.num_workers is None:
a = multiprocessing.cpu_count()
a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a = time.time()
a = load_dataset(args.dataset_name, split="train")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a = time.time()
a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 13 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __a :
__UpperCamelCase : Optional[Any] = XGLMConfig
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : Any = 'gelu'
def __init__( self : Optional[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : List[str]=14 ,lowerCamelCase : int=7 ,lowerCamelCase : Tuple=True ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Union[str, Any]=True ,lowerCamelCase : Tuple=99 ,lowerCamelCase : List[Any]=32 ,lowerCamelCase : List[str]=2 ,lowerCamelCase : Union[str, Any]=4 ,lowerCamelCase : List[str]=37 ,lowerCamelCase : List[str]="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Optional[Any]=512 ,lowerCamelCase : int=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = ffn_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 1
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=lowerCamelCase ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=lowerCamelCase ,)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCamelCase : Optional[int] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCamelCase : Tuple = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[str] = False
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFXGLMModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,n_embd=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFXGLMModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : str ,lowerCamelCase : str=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[2, 268, 9865]] ,dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__SCREAMING_SNAKE_CASE = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,do_sample=lowerCamelCase ,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
__SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
__SCREAMING_SNAKE_CASE = tokenizer("""Today is a nice day and""" ,return_tensors="""tf""" )
__SCREAMING_SNAKE_CASE = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,do_sample=lowerCamelCase ,seed=[7, 0] )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] ,skip_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(lowerCamelCase ,lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
__SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
__SCREAMING_SNAKE_CASE = """left"""
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_tensors="""tf""" ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCamelCase ,attention_mask=inputs["""attention_mask"""] ,max_new_tokens=12 )
__SCREAMING_SNAKE_CASE = tokenizer(sentences[0] ,return_tensors="""tf""" ).input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCamelCase ,max_new_tokens=12 )
__SCREAMING_SNAKE_CASE = tokenizer(sentences[1] ,return_tensors="""tf""" ).input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCamelCase ,max_new_tokens=12 )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase ,skip_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] ,skip_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
self.assertListEqual(lowerCamelCase ,[non_padded_sentence, padded_sentence] )
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : Any = ['input_values', 'padding_mask']
def __init__( self : str ,lowerCamelCase : int = 1 ,lowerCamelCase : int = 2_4000 ,lowerCamelCase : float = 0.0 ,lowerCamelCase : float = None ,lowerCamelCase : float = None ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(feature_size=lowerCamelCase ,sampling_rate=lowerCamelCase ,padding_value=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple ,lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,lowerCamelCase : Optional[int] = None ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(lowerCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCamelCase ,np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase ,dtype=np.floataa )
elif isinstance(lowerCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(lowerCamelCase ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = """max_length"""
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
lowerCamelCase ,max_length=lowerCamelCase ,truncation=lowerCamelCase ,padding=lowerCamelCase ,return_attention_mask=lowerCamelCase ,)
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("""attention_mask""" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(lowerCamelCase )
return padded_inputs
| 13 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 1 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=7 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
if token is not None:
__SCREAMING_SNAKE_CASE = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__SCREAMING_SNAKE_CASE = """636036"""
__SCREAMING_SNAKE_CASE = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__SCREAMING_SNAKE_CASE = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
return result["workflow_runs"]
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_daily_ci_runs(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__SCREAMING_SNAKE_CASE = workflow_run["""id"""]
break
return workflow_run_id
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_last_daily_ci_runs(__UpperCAmelCase )
if workflow_run_id is not None:
__SCREAMING_SNAKE_CASE = get_artifacts_links(worflow_run_id=__UpperCAmelCase , token=__UpperCAmelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__SCREAMING_SNAKE_CASE = artifacts_links[artifact_name]
download_artifact(
artifact_name=__UpperCAmelCase , artifact_url=__UpperCAmelCase , output_dir=__UpperCAmelCase , token=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
get_last_daily_ci_artifacts(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {}
for artifact_name in artifact_names:
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , f"""{artifact_name}.zip""" )
if os.path.isfile(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
with zipfile.ZipFile(__UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCAmelCase ):
# read the file
with z.open(__UpperCAmelCase ) as f:
__SCREAMING_SNAKE_CASE = f.read().decode("""UTF-8""" )
return results
| 13 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [144, 192, 240]
__SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [96, 120, 144]
__SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__SCREAMING_SNAKE_CASE = [64, 80, 96]
__SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 320]
__SCREAMING_SNAKE_CASE = 0.0_5
__SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 21
__SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json"""
else:
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if f""".global_rep.{i}.bias""" in name:
__SCREAMING_SNAKE_CASE = name.replace(f""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
__SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
__SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
__SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
__SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
__SCREAMING_SNAKE_CASE = """mobilevit.""" + name
return name
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
if base_model:
__SCREAMING_SNAKE_CASE = """"""
else:
__SCREAMING_SNAKE_CASE = """mobilevit."""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(__UpperCAmelCase )
if key[:8] == "encoder.":
__SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split(""".""" )
__SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
__SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__SCREAMING_SNAKE_CASE = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_mobilevit_config(__UpperCAmelCase )
# load original state_dict
__SCREAMING_SNAKE_CASE = torch.load(__UpperCAmelCase , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
__SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(__UpperCAmelCase ).eval()
else:
__SCREAMING_SNAKE_CASE = MobileViTForImageClassification(__UpperCAmelCase ).eval()
__SCREAMING_SNAKE_CASE = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__SCREAMING_SNAKE_CASE = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
__SCREAMING_SNAKE_CASE = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
__SCREAMING_SNAKE_CASE = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
__SCREAMING_SNAKE_CASE = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
__SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(__UpperCAmelCase , organization="""apple""" )
model.push_to_hub(__UpperCAmelCase , organization="""apple""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 13 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __a ( _snake_case ):
__UpperCamelCase : Any = 'megatron-bert'
def __init__( self : Any ,lowerCamelCase : Optional[Any]=2_9056 ,lowerCamelCase : Optional[int]=1024 ,lowerCamelCase : List[str]=24 ,lowerCamelCase : Optional[Any]=16 ,lowerCamelCase : Dict=4096 ,lowerCamelCase : List[str]="gelu" ,lowerCamelCase : Any=0.1 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : List[Any]=512 ,lowerCamelCase : Optional[int]=2 ,lowerCamelCase : int=0.02 ,lowerCamelCase : Optional[Any]=1E-1_2 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="absolute" ,lowerCamelCase : Dict=True ,**lowerCamelCase : int ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
| 13 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 1 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __a ( _snake_case ):
__UpperCamelCase : torch.FloatTensor
__UpperCamelCase : Optional[torch.FloatTensor] = None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> str:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__SCREAMING_SNAKE_CASE = []
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class __a ( _snake_case, _snake_case ):
@register_to_config
def __init__( self : Optional[int] ,lowerCamelCase : int = 1000 ,lowerCamelCase : str = "fixed_small_log" ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[float] = 1.0 ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 )
__SCREAMING_SNAKE_CASE = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE = 1.0
# setable values
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.arange(0 ,lowerCamelCase )[::-1].copy() )
__SCREAMING_SNAKE_CASE = variance_type
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : List[str]=None ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : Any=None ):
'''simple docstring'''
if prev_timestep is None:
__SCREAMING_SNAKE_CASE = t - 1
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__SCREAMING_SNAKE_CASE = self.betas[t]
else:
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__SCREAMING_SNAKE_CASE = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__SCREAMING_SNAKE_CASE = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__SCREAMING_SNAKE_CASE = torch.log(torch.clamp(lowerCamelCase ,min=1E-2_0 ) )
__SCREAMING_SNAKE_CASE = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__SCREAMING_SNAKE_CASE = variance.log()
__SCREAMING_SNAKE_CASE = beta.log()
__SCREAMING_SNAKE_CASE = (predicted_variance + 1) / 2
__SCREAMING_SNAKE_CASE = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : int ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : List[str]=None ,lowerCamelCase : bool = True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(lowerCamelCase ,sample.shape[1] ,dim=1 )
else:
__SCREAMING_SNAKE_CASE = None
# 1. compute alphas, betas
if prev_timestep is None:
__SCREAMING_SNAKE_CASE = t - 1
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__SCREAMING_SNAKE_CASE = self.betas[t]
__SCREAMING_SNAKE_CASE = self.alphas[t]
else:
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev
__SCREAMING_SNAKE_CASE = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = torch.clamp(
lowerCamelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__SCREAMING_SNAKE_CASE = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__SCREAMING_SNAKE_CASE = 0
if t > 0:
__SCREAMING_SNAKE_CASE = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=lowerCamelCase ,device=model_output.device )
__SCREAMING_SNAKE_CASE = self._get_variance(
lowerCamelCase ,predicted_variance=lowerCamelCase ,prev_timestep=lowerCamelCase ,)
if self.variance_type == "fixed_small_log":
__SCREAMING_SNAKE_CASE = variance
elif self.variance_type == "learned_range":
__SCREAMING_SNAKE_CASE = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
__SCREAMING_SNAKE_CASE = variance * variance_noise
__SCREAMING_SNAKE_CASE = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase ,pred_original_sample=lowerCamelCase )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.IntTensor ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = alphas_cumprod[timesteps] ** 0.5
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = (1 - alphas_cumprod[timesteps]) ** 0.5
__SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 13 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,lowerCamelCase )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,**lowerCamelCase : List[str] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,pad_token="""!""" ,**lowerCamelCase )
def UpperCAmelCase__ ( self : int ,**lowerCamelCase : List[str] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,pad_token="""!""" ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,**lowerCamelCase : Tuple ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(self.tmpdirname ,use_fast=lowerCamelCase )
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase )
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() ,encoded_processor[key][0].tolist() )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""cat""", """nasa badge"""]
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 16
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape ,(2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = [["""cat""", """nasa badge"""], ["""person"""]]
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
__SCREAMING_SNAKE_CASE = max([len(lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape ,(batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""cat""", """nasa badge"""]
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
__SCREAMING_SNAKE_CASE = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape ,(2, seq_length) )
self.assertListEqual(list(input_ids[0] ) ,predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) ,predicted_ids[1] )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,query_images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
| 13 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self : Optional[int] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[int]=13 ,lowerCamelCase : Union[str, Any]=32 ,lowerCamelCase : str=2 ,lowerCamelCase : str=3 ,lowerCamelCase : List[str]=16 ,lowerCamelCase : Tuple=[1, 2, 1] ,lowerCamelCase : List[Any]=[2, 2, 4] ,lowerCamelCase : Tuple=2 ,lowerCamelCase : int=2.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Union[str, Any]=0.0 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : List[str]="gelu" ,lowerCamelCase : Dict=False ,lowerCamelCase : str=True ,lowerCamelCase : Any=0.02 ,lowerCamelCase : Tuple=1E-5 ,lowerCamelCase : str=True ,lowerCamelCase : Any=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : List[Any]=10 ,lowerCamelCase : Union[str, Any]=8 ,lowerCamelCase : str=["stage1", "stage2", "stage3"] ,lowerCamelCase : int=[1, 2, 3] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = patch_norm
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = encoder_stride
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskFormerSwinModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskFormerSwinBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = ["""stem"""]
__SCREAMING_SNAKE_CASE = MaskFormerSwinBackbone(config=lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : Dict = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[int] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase : Dict = False
__UpperCamelCase : str = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskFormerSwinModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase ,nn.Linear ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# Swin has a different seq_length
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,(padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase : Union[str, Any] ):
__SCREAMING_SNAKE_CASE = 0
return t
def check_equivalence(lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : str={} ):
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase ,return_dict=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase ,return_dict=lowerCamelCase ,**lowerCamelCase ).to_tuple()
def recursive_check(lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any] ):
if isinstance(lowerCamelCase ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase ,lowerCamelCase ):
recursive_check(lowerCamelCase ,lowerCamelCase )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(lowerCamelCase ,lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCamelCase ) ,set_nan_tensor_to_zero(lowerCamelCase ) ,atol=1E-5 ) ,msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(lowerCamelCase ).any()} and `inf`: {torch.isinf(lowerCamelCase )}. Dict has"""
f""" `nan`: {torch.isnan(lowerCamelCase ).any()} and `inf`: {torch.isinf(lowerCamelCase )}."""
) ,)
recursive_check(lowerCamelCase ,lowerCamelCase )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
check_equivalence(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
check_equivalence(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,{"""output_hidden_states""": True} )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,{"""output_hidden_states""": True} )
@require_torch
class __a ( unittest.TestCase, _snake_case ):
__UpperCamelCase : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = backbone_class(lowerCamelCase )
backbone.to(lowerCamelCase )
backbone.eval()
__SCREAMING_SNAKE_CASE = backbone(**lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__SCREAMING_SNAKE_CASE = backbone(**lowerCamelCase ,output_hidden_states=lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__SCREAMING_SNAKE_CASE = backbone(**lowerCamelCase ,output_attentions=lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 13 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 1 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=__UpperCAmelCase , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=__UpperCAmelCase , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=__UpperCAmelCase , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=__UpperCAmelCase , default=0 , help="""cuda_id.""" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
return args
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if not len(__UpperCAmelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = imgs[0].size
__SCREAMING_SNAKE_CASE = Image.new("""RGB""" , size=(cols * w, rows * h) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = grid.size
for i, img in enumerate(__UpperCAmelCase ):
grid.paste(__UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase="robotic cat with wings" , __UpperCAmelCase=7.5 , __UpperCAmelCase=50 , __UpperCAmelCase=1 , __UpperCAmelCase=42 , ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.Generator(pipeline.device ).manual_seed(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = pipeline(
__UpperCAmelCase , guidance_scale=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , ).images
__SCREAMING_SNAKE_CASE = int(math.sqrt(__UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = image_grid(__UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
a = parse_args()
# Load models and create wrapper for stable diffusion
a = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
a = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
a = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
a = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
a = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
a = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
a = unet.to(torch.device("cuda", args.cuda_id))
a = pipeline.to(unet.device)
a , a = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
a = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 13 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 1 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Mock()
__SCREAMING_SNAKE_CASE = conn, Mock()
__SCREAMING_SNAKE_CASE = iter([1, None] )
__SCREAMING_SNAKE_CASE = lambda __UpperCAmelCase : next(__UpperCAmelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=__UpperCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 13 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a ( _snake_case ):
def __init__( self : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase ,scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self : List[Any] ,lowerCamelCase : int = 1 ,lowerCamelCase : int = 100 ,lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase : Optional[float] = None ,lowerCamelCase : bool = True ,):
'''simple docstring'''
if audio_length_in_s is None:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size / self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE = audio_length_in_s * self.unet.config.sample_rate
__SCREAMING_SNAKE_CASE = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
__SCREAMING_SNAKE_CASE = int(lowerCamelCase )
if sample_size % down_scale_factor != 0:
__SCREAMING_SNAKE_CASE = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
__SCREAMING_SNAKE_CASE = int(lowerCamelCase )
__SCREAMING_SNAKE_CASE = next(iter(self.unet.parameters() ) ).dtype
__SCREAMING_SNAKE_CASE = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase ,lowerCamelCase ) and len(lowerCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__SCREAMING_SNAKE_CASE = randn_tensor(lowerCamelCase ,generator=lowerCamelCase ,device=self.device ,dtype=lowerCamelCase )
# set step values
self.scheduler.set_timesteps(lowerCamelCase ,device=audio.device )
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE = self.unet(lowerCamelCase ,lowerCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ).prev_sample
__SCREAMING_SNAKE_CASE = audio.clamp(-1 ,1 ).float().cpu().numpy()
__SCREAMING_SNAKE_CASE = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase )
| 13 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 1 |
'''simple docstring'''
import re
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
if len(re.findall("""[ATCG]""" , __UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __a ( metaclass=_snake_case ):
__UpperCamelCase : List[str] = ['torch', 'torchsde']
def __init__( self : Optional[Any] ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] ,*lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if train_file is not None:
__SCREAMING_SNAKE_CASE = [train_file]
if eval_file is not None:
__SCREAMING_SNAKE_CASE = [eval_file]
if test_file is not None:
__SCREAMING_SNAKE_CASE = [test_file]
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""csv""" , data_files=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = list(ds[list(files.keys() )[0]].features.keys() )
__SCREAMING_SNAKE_CASE = features_name.pop(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = list(set(ds[list(files.keys() )[0]][label_name] ) )
__SCREAMING_SNAKE_CASE = {label: i for i, label in enumerate(__UpperCAmelCase )}
__SCREAMING_SNAKE_CASE = tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
__SCREAMING_SNAKE_CASE = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" ) , batched=__UpperCAmelCase , )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
__SCREAMING_SNAKE_CASE = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" , ) , batched=__UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__SCREAMING_SNAKE_CASE = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__SCREAMING_SNAKE_CASE = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__SCREAMING_SNAKE_CASE = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE = labelaid[ex[label_name]]
yield (d, label)
__SCREAMING_SNAKE_CASE = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__SCREAMING_SNAKE_CASE = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__SCREAMING_SNAKE_CASE = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__SCREAMING_SNAKE_CASE = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__SCREAMING_SNAKE_CASE = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__SCREAMING_SNAKE_CASE = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : int = field(metadata={'help': 'Which column contains the label'} )
__UpperCamelCase : str = field(default=_snake_case, metadata={'help': 'The path of the training file'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'The path of the development file'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'The path of the test file'} )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __a :
__UpperCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase ) -> Dict:
__SCREAMING_SNAKE_CASE = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = TFTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 1 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = GPTSwaTokenizer
__UpperCamelCase : List[str] = False
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[str] = False
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer(lowerCamelCase ,eos_token="""<unk>""" ,bos_token="""<unk>""" ,pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """This is a test"""
__SCREAMING_SNAKE_CASE = """This is a test"""
return input_text, output_text
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """<s>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) ,lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(lowerCamelCase ) ,2000 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,2000 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[465, 287, 265, 631, 842] )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowerCamelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] ,)
# fmt: on
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCamelCase )
# fmt: off
self.assertListEqual(
lowerCamelCase ,["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__SCREAMING_SNAKE_CASE = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCamelCase ,lowerCamelCase ):
self.assertListEqual(tokenizer.encode_fast(lowerCamelCase ) ,lowerCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(tokenizer.decode_fast(lowerCamelCase ) ,lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase ,model_name="""AI-Sweden/gpt-sw3-126m""" ,sequences=lowerCamelCase ,)
| 13 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 1 |
'''simple docstring'''
import torch
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __magic_name__ ( __UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
for i in range(2 , max_n + 1 ):
__SCREAMING_SNAKE_CASE = pre_numerator
__SCREAMING_SNAKE_CASE = 2 * i // 3 if i % 3 == 0 else 1
__SCREAMING_SNAKE_CASE = cur_numerator
__SCREAMING_SNAKE_CASE = e_cont * pre_numerator + temp
return sum_digits(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 | 1 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.inf
def set_batch_size(__UpperCAmelCase ) -> None:
nonlocal batch_size
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and feature.dtype == "binary":
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCAmelCase , __UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class __a ( _snake_case ):
def __init__( self : Tuple ,lowerCamelCase : NestedDataStructureLike[PathLike] ,lowerCamelCase : Optional[NamedSplit] = None ,lowerCamelCase : Optional[Features] = None ,lowerCamelCase : str = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(
lowerCamelCase ,split=lowerCamelCase ,features=lowerCamelCase ,cache_dir=lowerCamelCase ,keep_in_memory=lowerCamelCase ,streaming=lowerCamelCase ,num_proc=lowerCamelCase ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = path_or_paths if isinstance(lowerCamelCase ,lowerCamelCase ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
__SCREAMING_SNAKE_CASE = Parquet(
cache_dir=lowerCamelCase ,data_files=lowerCamelCase ,features=lowerCamelCase ,hash=lowerCamelCase ,**lowerCamelCase ,)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.streaming:
__SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCamelCase ,download_mode=lowerCamelCase ,verification_mode=lowerCamelCase ,base_path=lowerCamelCase ,num_proc=self.num_proc ,)
__SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split=self.split ,verification_mode=lowerCamelCase ,in_memory=self.keep_in_memory )
return dataset
class __a :
def __init__( self : List[Any] ,lowerCamelCase : Dataset ,lowerCamelCase : Union[PathLike, BinaryIO] ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : List[Any] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = path_or_buf
__SCREAMING_SNAKE_CASE = batch_size or get_writer_batch_size(dataset.features )
__SCREAMING_SNAKE_CASE = parquet_writer_kwargs
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"""wb+""" ) as buffer:
__SCREAMING_SNAKE_CASE = self._write(file_obj=lowerCamelCase ,batch_size=lowerCamelCase ,**self.parquet_writer_kwargs )
else:
__SCREAMING_SNAKE_CASE = self._write(file_obj=self.path_or_buf ,batch_size=lowerCamelCase ,**self.parquet_writer_kwargs )
return written
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : BinaryIO ,lowerCamelCase : int ,**lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = parquet_writer_kwargs.pop("""path_or_buf""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dataset.features.arrow_schema
__SCREAMING_SNAKE_CASE = pq.ParquetWriter(lowerCamelCase ,schema=lowerCamelCase ,**lowerCamelCase )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,lowerCamelCase ) ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating parquet from Arrow format""" ,):
__SCREAMING_SNAKE_CASE = query_table(
table=self.dataset._data ,key=slice(lowerCamelCase ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(lowerCamelCase )
written += batch.nbytes
writer.close()
return written
| 13 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13 | 1 |
'''simple docstring'''
import math
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( __UpperCAmelCase = 10001 ) -> int:
'''simple docstring'''
try:
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 2
while len(__UpperCAmelCase ) < nth:
if is_prime(__UpperCAmelCase ):
primes.append(__UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(__UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 1 |
'''simple docstring'''
import argparse
a = "docs/source/_static/js/custom.js"
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
with open(__UpperCAmelCase , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
__SCREAMING_SNAKE_CASE = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
a = parser.parse_args()
update_custom_js(args.version)
| 13 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 1 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __a ( _snake_case ):
@slow
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
__SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
__SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
__SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
__SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
__SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(lowerCamelCase : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__SCREAMING_SNAKE_CASE = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=lowerCamelCase ,max_length=512 )
__SCREAMING_SNAKE_CASE = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=lowerCamelCase ,max_length=128 )
__SCREAMING_SNAKE_CASE = inputs.input_ids
__SCREAMING_SNAKE_CASE = inputs.attention_mask
__SCREAMING_SNAKE_CASE = outputs.input_ids
__SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
__SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCamelCase : Tuple ):
__SCREAMING_SNAKE_CASE = pred.label_ids
__SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase ,skip_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase ,skip_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCamelCase ) )] ) / len(lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
__SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=lowerCamelCase ,batch_size=lowerCamelCase ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
__SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=lowerCamelCase ,batch_size=lowerCamelCase ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=lowerCamelCase ,per_device_train_batch_size=lowerCamelCase ,per_device_eval_batch_size=lowerCamelCase ,predict_with_generate=lowerCamelCase ,evaluation_strategy="""steps""" ,do_train=lowerCamelCase ,do_eval=lowerCamelCase ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
__SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=lowerCamelCase ,args=lowerCamelCase ,compute_metrics=_compute_metrics ,train_dataset=lowerCamelCase ,eval_dataset=lowerCamelCase ,tokenizer=lowerCamelCase ,)
# start training
trainer.train()
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
__SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
__SCREAMING_SNAKE_CASE = 0.0_1
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = time.time()
locka.acquire(__UpperCAmelCase )
assert time.time() - _start > timeout
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """a""" * 1000 + """.lock"""
__SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(__UpperCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__SCREAMING_SNAKE_CASE = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
locka.acquire(0 )
| 13 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __a ( metaclass=_snake_case ):
__UpperCamelCase : Tuple = ['note_seq']
def __init__( self : Optional[Any] ,*lowerCamelCase : Tuple ,**lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(self ,["""note_seq"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict ,*lowerCamelCase : Tuple ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,["""note_seq"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls ,["""note_seq"""] )
| 13 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = BlipImageProcessor()
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).tokenizer
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).image_processor
def UpperCAmelCase__ ( self : List[Any] ,**lowerCamelCase : Dict ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).qformer_tokenizer
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
self.assertIsInstance(processor.qformer_tokenizer ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = qformer_tokenizer(lowerCamelCase ,return_token_type_ids=lowerCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor["""qformer_""" + key] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] ,)
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] ,)
| 13 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 1 |
'''simple docstring'''
a = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
a = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__SCREAMING_SNAKE_CASE = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {', '.join(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __a ( _snake_case ):
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Whether to use SortishSampler or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
}, )
__UpperCamelCase : Optional[Union[str, Path, GenerationConfig]] = field(
default=_snake_case, metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
}, )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = v.to_dict()
return d
| 13 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a = 256047
a = 256145
@require_sentencepiece
@require_tokenizers
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : List[str] = NllbTokenizer
__UpperCamelCase : Union[str, Any] = NllbTokenizerFast
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = True
__UpperCamelCase : Optional[Any] = {}
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = NllbTokenizer(lowerCamelCase ,keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NllbTokenizer(lowerCamelCase ,keep_accents=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase ,lowerCamelCase )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase ,legacy_format=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase ,lowerCamelCase )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase ,legacy_format=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if not self.test_seqaseq:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
__SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
__SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase ,tgt_texts=lowerCamelCase ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ,)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,10 )
# max_target_length will default to max_length if not specified
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
lowerCamelCase ,tgt_texts=lowerCamelCase ,max_length=3 ,return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,3 )
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 )
self.assertNotIn("""decoder_input_ids""" ,lowerCamelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = [AddedToken("""<special>""" ,lstrip=lowerCamelCase )]
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase ,additional_special_tokens=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode("""Hey this is a <special> token""" )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode("""<special>""" ,add_special_tokens=lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase ,additional_special_tokens=lowerCamelCase ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(
lowerCamelCase ,additional_special_tokens=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode("""Hey this is a <special> token""" )
__SCREAMING_SNAKE_CASE = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
__UpperCamelCase : Tuple = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCamelCase : Dict = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def UpperCAmelCase__ ( cls : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NllbTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" )
__SCREAMING_SNAKE_CASE = 1
return cls
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] ,25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] ,25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] ,25_6057 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertIn(lowerCamelCase ,self.tokenizer.all_special_ids )
# fmt: off
__SCREAMING_SNAKE_CASE = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCamelCase ,skip_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,max_length=lowerCamelCase ,truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1] ,2 )
self.assertEqual(ids[0] ,lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[25_6203, 3] )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = NllbTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase )
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
__SCREAMING_SNAKE_CASE = shift_tokens_right(
batch["""labels"""] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
self.assertEqual((2, 15) ,batch.input_ids.shape )
self.assertEqual((2, 15) ,batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase )
self.assertEqual(lowerCamelCase ,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=3 ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=10 ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = targets["""input_ids"""]
__SCREAMING_SNAKE_CASE = shift_tokens_right(
lowerCamelCase ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,)
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[25_6047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_6057,
} ,)
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids ,[1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids ,[25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 13 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__SCREAMING_SNAKE_CASE = """"""
else:
__SCREAMING_SNAKE_CASE = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
__SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
__SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dct.pop(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = val
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ViTMSNConfig()
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = """datasets/huggingface/label-files"""
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 1536
__SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 4096
__SCREAMING_SNAKE_CASE = 24
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = ViTMSNModel(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="""cpu""" )["""target_encoder"""]
__SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , base_model=__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
__SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCAmelCase , atol=1e-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 13 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __a ( _snake_case ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = 5
# Realm tok
__SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""realm_tokenizer""" )
os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase )
__SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""realm_block_records""" )
os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""realm_tokenizer""" ) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] ,dtype=lowerCamelCase ,)
return block_records
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RealmRetriever(
block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,)
return retriever
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = self.get_dummy_retriever()
__SCREAMING_SNAKE_CASE = retriever.tokenizer
__SCREAMING_SNAKE_CASE = np.array([0, 3] ,dtype="""long""" )
__SCREAMING_SNAKE_CASE = tokenizer(["""Test question"""] ).input_ids
__SCREAMING_SNAKE_CASE = tokenizer(
["""the fourth"""] ,add_special_tokens=lowerCamelCase ,return_token_type_ids=lowerCamelCase ,return_attention_mask=lowerCamelCase ,).input_ids
__SCREAMING_SNAKE_CASE = config.reader_seq_len
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever(
lowerCamelCase ,lowerCamelCase ,answer_ids=lowerCamelCase ,max_length=lowerCamelCase ,return_tensors="""np""" )
self.assertEqual(len(lowerCamelCase ) ,2 )
self.assertEqual(len(lowerCamelCase ) ,2 )
self.assertEqual(len(lowerCamelCase ) ,2 )
self.assertEqual(concat_inputs.input_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape ,(2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] ,)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] ,)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = self.get_dummy_retriever()
__SCREAMING_SNAKE_CASE = retriever.tokenizer
__SCREAMING_SNAKE_CASE = np.array([0, 3, 5] ,dtype="""long""" )
__SCREAMING_SNAKE_CASE = tokenizer(["""Test question"""] ).input_ids
__SCREAMING_SNAKE_CASE = tokenizer(
["""the fourth""", """longer longer"""] ,add_special_tokens=lowerCamelCase ,return_token_type_ids=lowerCamelCase ,return_attention_mask=lowerCamelCase ,).input_ids
__SCREAMING_SNAKE_CASE = config.reader_seq_len
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever(
lowerCamelCase ,lowerCamelCase ,answer_ids=lowerCamelCase ,max_length=lowerCamelCase ,return_tensors="""np""" )
self.assertEqual([False, True, True] ,lowerCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,lowerCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname ,"""realm_block_records""" ) )
# Test local path
__SCREAMING_SNAKE_CASE = retriever.from_pretrained(os.path.join(self.tmpdirname ,"""realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] ,b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
__SCREAMING_SNAKE_CASE = os.path.join(
os.path.join(self.tmpdirname ,"""realm_block_records""" ) ,_REALM_BLOCK_RECORDS_FILENAME )
__SCREAMING_SNAKE_CASE = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] ,b"""This is the first record""" )
| 13 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __a :
def __init__( self : str ,lowerCamelCase : str ,lowerCamelCase : int=13 ,lowerCamelCase : List[Any]=30 ,lowerCamelCase : str=2 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : int=True ,lowerCamelCase : str=32 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[int]=37 ,lowerCamelCase : int="gelu" ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Union[str, Any]=0.02 ,lowerCamelCase : Any=3 ,lowerCamelCase : int=None ,lowerCamelCase : str=2 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 2
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Dict ,lowerCamelCase : Any ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : Any ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : int = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Optional[int] = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase ,tf.keras.layers.Dense ) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Any ,lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFDeiTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __magic_name__ ( ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""tf""" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase )
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,lowerCamelCase ,atol=1E-4 ) )
| 13 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
a = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__SCREAMING_SNAKE_CASE = int(re.match(R""".*layer_(\d*).*""" , __UpperCAmelCase )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
__SCREAMING_SNAKE_CASE = re.search(R"""[^\d](\d+)$""" , str(__UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
__SCREAMING_SNAKE_CASE = int(bit_search.groups()[0] )
return bit_size // 8
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if bloom_config_file == "":
__SCREAMING_SNAKE_CASE = BloomConfig()
else:
__SCREAMING_SNAKE_CASE = BloomConfig.from_json_file(__UpperCAmelCase )
if shard_model:
__SCREAMING_SNAKE_CASE = os.listdir(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sorted(filter(lambda __UpperCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = {"""weight_map""": {}, """metadata""": {}}
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BloomConfig()
for j, file in enumerate(__UpperCAmelCase ):
print("""Processing file: {}""".format(__UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = None
for i in range(__UpperCAmelCase ):
# load all TP files
__SCREAMING_SNAKE_CASE = file.replace("""model_00""" , f"""model_0{i}""" )
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
__SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = temp.pop(__UpperCAmelCase )
if tensors is None:
__SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
torch.save(
__UpperCAmelCase , os.path.join(
__UpperCAmelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__SCREAMING_SNAKE_CASE = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__SCREAMING_SNAKE_CASE = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) )
__SCREAMING_SNAKE_CASE = BloomConfig()
__SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__SCREAMING_SNAKE_CASE = total_size
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCAmelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + """\n"""
f.write(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = BloomModel(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = os.listdir(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sorted(filter(lambda __UpperCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = None
for i, file in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = None
for i in range(__UpperCAmelCase ):
# load all TP files
__SCREAMING_SNAKE_CASE = file.replace("""model_00""" , f"""model_0{i}""" )
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
__SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = temp.pop(__UpperCAmelCase )
if tensors is None:
__SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
__SCREAMING_SNAKE_CASE = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
__SCREAMING_SNAKE_CASE = set(other_keys.missing_keys )
else:
__SCREAMING_SNAKE_CASE = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
__SCREAMING_SNAKE_CASE = model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCAmelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 13 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , index + 1 )
if weights[index] <= max_weight:
__SCREAMING_SNAKE_CASE = values[index] + knapsack(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , max_weight - weights[index] , index + 1 )
return max(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
# Imports
import numpy as np
class __a :
def __init__( self : Dict ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Dict=None ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : Tuple=None ,lowerCamelCase : int=None ):
'''simple docstring'''
self.set_matricies(red=lowerCamelCase ,green=lowerCamelCase ,blue=lowerCamelCase ,red_edge=lowerCamelCase ,nir=lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : int=None ,lowerCamelCase : Any=None ):
'''simple docstring'''
if red is not None:
__SCREAMING_SNAKE_CASE = red
if green is not None:
__SCREAMING_SNAKE_CASE = green
if blue is not None:
__SCREAMING_SNAKE_CASE = blue
if red_edge is not None:
__SCREAMING_SNAKE_CASE = red_edge
if nir is not None:
__SCREAMING_SNAKE_CASE = nir
return True
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[str]="" ,lowerCamelCase : Tuple=None ,lowerCamelCase : int=None ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[str]=None ,lowerCamelCase : str=None ):
'''simple docstring'''
self.set_matricies(red=lowerCamelCase ,green=lowerCamelCase ,blue=lowerCamelCase ,red_edge=lowerCamelCase ,nir=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : List[str]=0.08 ,lowerCamelCase : Dict=1.22 ,lowerCamelCase : int=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Union[str, Any]=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Optional[int]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__SCREAMING_SNAKE_CASE = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 13 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 | 1 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES
else:
__SCREAMING_SNAKE_CASE = {tokenizer_name: getattr(__UpperCAmelCase , tokenizer_name + """Fast""" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__SCREAMING_SNAKE_CASE = TOKENIZER_CLASSES[tokenizer_name]
__SCREAMING_SNAKE_CASE = True
if checkpoint_name is None:
__SCREAMING_SNAKE_CASE = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__SCREAMING_SNAKE_CASE = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(__UpperCAmelCase , force_download=__UpperCAmelCase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = checkpoint.split("""/""" )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
elif add_prefix:
__SCREAMING_SNAKE_CASE = checkpoint
__SCREAMING_SNAKE_CASE = dump_path
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__SCREAMING_SNAKE_CASE = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__SCREAMING_SNAKE_CASE = file_path.split(__UpperCAmelCase )[-1][0]
if next_char == "/":
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__SCREAMING_SNAKE_CASE = tokenizer.save_pretrained(
__UpperCAmelCase , legacy_format=__UpperCAmelCase , filename_prefix=__UpperCAmelCase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__UpperCAmelCase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 13 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 1 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a = "src/transformers"
a = "docs/source/en/tasks"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
# Find the start prompt.
__SCREAMING_SNAKE_CASE = 0
while not lines[start_index].startswith(__UpperCAmelCase ):
start_index += 1
start_index += 1
__SCREAMING_SNAKE_CASE = start_index
while not lines[end_index].startswith(__UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a = direct_transformers_import(TRANSFORMERS_PATH)
a = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TASK_GUIDE_TO_MODELS[task_guide]
__SCREAMING_SNAKE_CASE = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCAmelCase , set() )
__SCREAMING_SNAKE_CASE = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _find_text_in_file(
filename=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
__SCREAMING_SNAKE_CASE = get_model_list_for_task(__UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 13 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (32, 32)
__SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=lowerCamelCase ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
return CLIPTextModel(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE = DDPMScheduler()
__SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,)
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase ,)[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE = DDPMScheduler()
__SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,)
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE = DDPMScheduler()
__SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__SCREAMING_SNAKE_CASE = unet.half()
__SCREAMING_SNAKE_CASE = text_encoder.half()
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,)
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,num_inference_steps=2 ,output_type="""np""" ,).images
__SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler"""
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = """a cat sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler"""
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = """a cat sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler"""
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = """a cat sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,num_inference_steps=5 ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 13 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
a = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
a = "▁"
# Segments (not really needed)
a = 0
a = 1
a = 2
a = 3
a = 4
class __a ( _snake_case ):
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Dict = 'left'
__UpperCamelCase : Optional[int] = XLNetTokenizer
def __init__( self : List[str] ,lowerCamelCase : Any=None ,lowerCamelCase : Tuple=None ,lowerCamelCase : Optional[Any]=False ,lowerCamelCase : int=True ,lowerCamelCase : str=False ,lowerCamelCase : Union[str, Any]="<s>" ,lowerCamelCase : Optional[int]="</s>" ,lowerCamelCase : List[str]="<unk>" ,lowerCamelCase : Optional[int]="<sep>" ,lowerCamelCase : Any="<pad>" ,lowerCamelCase : str="<cls>" ,lowerCamelCase : Dict="<mask>" ,lowerCamelCase : List[Any]=["<eop>", "<eod>"] ,**lowerCamelCase : int ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ) if isinstance(lowerCamelCase ,lowerCamelCase ) else mask_token
super().__init__(
vocab_file=lowerCamelCase ,tokenizer_file=lowerCamelCase ,do_lower_case=lowerCamelCase ,remove_space=lowerCamelCase ,keep_accents=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,unk_token=lowerCamelCase ,sep_token=lowerCamelCase ,pad_token=lowerCamelCase ,cls_token=lowerCamelCase ,mask_token=lowerCamelCase ,additional_special_tokens=lowerCamelCase ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file ,lowerCamelCase )
return (out_vocab_file,)
| 13 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 1 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=False , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , type_path="""train""" , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = tok.pad_token_id
def get_lens(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = tqdm(
DataLoader(__UpperCAmelCase , batch_size=512 , num_workers=8 , shuffle=__UpperCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__SCREAMING_SNAKE_CASE = []
for batch in dl:
__SCREAMING_SNAKE_CASE = batch["""input_ids"""].ne(__UpperCAmelCase ).sum(1 ).tolist()
__SCREAMING_SNAKE_CASE = batch["""labels"""].ne(__UpperCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__UpperCAmelCase , __UpperCAmelCase ):
max_lens.append(max(__UpperCAmelCase , __UpperCAmelCase ) )
else:
max_lens.extend(__UpperCAmelCase )
return max_lens
__SCREAMING_SNAKE_CASE = get_lens(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , type_path="""val""" , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = get_lens(__UpperCAmelCase )
pickle_save(__UpperCAmelCase , train_ds.len_file )
pickle_save(__UpperCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 13 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 1 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OmegaConf.load(__UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCAmelCase ) ) )
return config
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
if conf_path is None:
__SCREAMING_SNAKE_CASE = """./model_checkpoints/vqgan_only.yaml"""
__SCREAMING_SNAKE_CASE = load_config(__UpperCAmelCase , display=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE = """./model_checkpoints/vqgan_only.pt"""
__SCREAMING_SNAKE_CASE = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE = sd["""state_dict"""]
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.to(__UpperCAmelCase )
del sd
return model
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.encode(__UpperCAmelCase )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
__SCREAMING_SNAKE_CASE = model.decode(__UpperCAmelCase )
return xrec
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = string.rsplit(""".""" , 1 )
if reload:
__SCREAMING_SNAKE_CASE = importlib.import_module(__UpperCAmelCase )
importlib.reload(__UpperCAmelCase )
return getattr(importlib.import_module(__UpperCAmelCase , package=__UpperCAmelCase ) , cls )
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = instantiate_from_config(__UpperCAmelCase )
if sd is not None:
model.load_state_dict(__UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if ckpt:
__SCREAMING_SNAKE_CASE = torch.load(__UpperCAmelCase , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pl_sd["""global_step"""]
print(f"""loaded model from global step {global_step}.""" )
else:
__SCREAMING_SNAKE_CASE = {"""state_dict""": None}
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=__UpperCAmelCase , eval_mode=__UpperCAmelCase )["""model"""]
return model, global_step
| 13 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 1 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __a ( unittest.TestCase ):
__UpperCamelCase : int = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : str ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
__SCREAMING_SNAKE_CASE = VideoClassificationPipeline(model=lowerCamelCase ,image_processor=lowerCamelCase ,top_k=2 )
__SCREAMING_SNAKE_CASE = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
for example in examples:
__SCREAMING_SNAKE_CASE = video_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase ,[
{"""score""": ANY(lowerCamelCase ), """label""": ANY(lowerCamelCase )},
{"""score""": ANY(lowerCamelCase ), """label""": ANY(lowerCamelCase )},
] ,)
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
__SCREAMING_SNAKE_CASE = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} ,crop_size={"""height""": 10, """width""": 10} )
__SCREAMING_SNAKE_CASE = pipeline(
"""video-classification""" ,model=lowerCamelCase ,feature_extractor=lowerCamelCase ,frame_sampling_rate=4 )
__SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
__SCREAMING_SNAKE_CASE = video_classifier(lowerCamelCase ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=4 ) ,[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] ,)
__SCREAMING_SNAKE_CASE = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(lowerCamelCase ,decimals=4 ) ,[
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] ,)
@require_tf
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a = (3, 9, -11, 0, 7, 5, 1, -1)
a = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __a :
__UpperCamelCase : int
__UpperCamelCase : Node | None
class __a :
def __init__( self : Optional[Any] ,lowerCamelCase : Iterable[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
for i in sorted(lowerCamelCase ,reverse=lowerCamelCase ):
__SCREAMING_SNAKE_CASE = Node(lowerCamelCase ,self.head )
def __iter__( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next_node
def __len__( self : Optional[int] ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : List[Any] ):
'''simple docstring'''
return " -> ".join([str(lowerCamelCase ) for node in self] )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(__UpperCAmelCase ) + list(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 13 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase , __UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
__SCREAMING_SNAKE_CASE = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """tmp.sql""" )
__SCREAMING_SNAKE_CASE = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
__SCREAMING_SNAKE_CASE = iter_sql_file(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """tmp.sql""" )
__SCREAMING_SNAKE_CASE = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
__SCREAMING_SNAKE_CASE = iter_sql_file(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """tmp.sql""" )
__SCREAMING_SNAKE_CASE = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__UpperCAmelCase ).read()
with pytest.raises(__UpperCAmelCase ):
SqlDatasetWriter(__UpperCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 13 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 1 |
'''simple docstring'''
from collections import defaultdict
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCAmelCase )
if ret % 2 == 0:
cuts.append(__UpperCAmelCase )
return ret
def __magic_name__ ( ) -> Any:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a , a = 10, 9
a = defaultdict(list)
a = {}
a = []
a = 0
a = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 13 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( ) -> int:
'''simple docstring'''
return 1
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase = 200 ) -> int:
'''simple docstring'''
return two_pound(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 13 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __a ( _snake_case ):
def __init__( self : List[Any] ,lowerCamelCase : NestedDataStructureLike[PathLike] ,lowerCamelCase : Optional[NamedSplit] = None ,lowerCamelCase : Optional[Features] = None ,lowerCamelCase : str = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[str] = None ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
super().__init__(
lowerCamelCase ,split=lowerCamelCase ,features=lowerCamelCase ,cache_dir=lowerCamelCase ,keep_in_memory=lowerCamelCase ,streaming=lowerCamelCase ,num_proc=lowerCamelCase ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = field
__SCREAMING_SNAKE_CASE = path_or_paths if isinstance(lowerCamelCase ,lowerCamelCase ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE = Json(
cache_dir=lowerCamelCase ,data_files=lowerCamelCase ,features=lowerCamelCase ,field=lowerCamelCase ,**lowerCamelCase ,)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.streaming:
__SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCamelCase ,download_mode=lowerCamelCase ,verification_mode=lowerCamelCase ,base_path=lowerCamelCase ,num_proc=self.num_proc ,)
__SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split=self.split ,verification_mode=lowerCamelCase ,in_memory=self.keep_in_memory )
return dataset
class __a :
def __init__( self : Tuple ,lowerCamelCase : Dataset ,lowerCamelCase : Union[PathLike, BinaryIO] ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : str ,):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = path_or_buf
__SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = """utf-8"""
__SCREAMING_SNAKE_CASE = to_json_kwargs
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""path_or_buf""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""orient""" ,"""records""" )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""lines""" ,True if orient == """records""" else False )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""index""" ,False if orient in ["""split""", """table"""] else True )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""compression""" ,lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"""wb""" ,compression=lowerCamelCase ) as buffer:
__SCREAMING_SNAKE_CASE = self._write(file_obj=lowerCamelCase ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
""" was passed. Please provide a local path instead.""" )
__SCREAMING_SNAKE_CASE = self._write(
file_obj=self.path_or_buf ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**self.to_json_kwargs )
return written
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data ,key=slice(lowerCamelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
__SCREAMING_SNAKE_CASE = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**lowerCamelCase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : BinaryIO ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Any ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,):
__SCREAMING_SNAKE_CASE = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,lowerCamelCase ,lowerCamelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,):
written += file_obj.write(lowerCamelCase )
return written
| 13 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCAmelCase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__SCREAMING_SNAKE_CASE = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
__SCREAMING_SNAKE_CASE = PipelineDataFormat.from_str(
format=__UpperCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__UpperCAmelCase , __UpperCAmelCase )
class __a ( _snake_case ):
def __init__( self : Tuple ,lowerCamelCase : Pipeline ,lowerCamelCase : PipelineDataFormat ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nlp
__SCREAMING_SNAKE_CASE = reader
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : ArgumentParser ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parser.add_parser("""run""" ,help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" ,choices=get_supported_tasks() ,help="""Task to run""" )
run_parser.add_argument("""--input""" ,type=lowerCamelCase ,help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" ,type=lowerCamelCase ,help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" ,type=lowerCamelCase ,help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" ,type=lowerCamelCase ,help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" ,type=lowerCamelCase ,help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" ,type=lowerCamelCase ,help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" ,)
run_parser.add_argument(
"""--format""" ,type=lowerCamelCase ,default="""infer""" ,choices=PipelineDataFormat.SUPPORTED_FORMATS ,help="""Input format to read from""" ,)
run_parser.add_argument(
"""--device""" ,type=lowerCamelCase ,default=-1 ,help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" ,)
run_parser.add_argument("""--overwrite""" ,action="""store_true""" ,help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._nlp, []
for entry in self._reader:
__SCREAMING_SNAKE_CASE = nlp(**lowerCamelCase ) if self._reader.is_multi_columns else nlp(lowerCamelCase )
if isinstance(lowerCamelCase ,lowerCamelCase ):
outputs.append(lowerCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__SCREAMING_SNAKE_CASE = self._reader.save_binary(lowerCamelCase )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowerCamelCase )
| 13 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 1 |
'''simple docstring'''
a = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 13 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 1 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__UpperCAmelCase , 0 , __UpperCAmelCase , args=(__UpperCAmelCase) )[0]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(__UpperCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 13 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
return image
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dct.pop(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = val
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(__UpperCAmelCase , requires_grad=__UpperCAmelCase ), v_bias) )
__SCREAMING_SNAKE_CASE = qkv_bias
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 364 if """coco""" in model_name else 224
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=__UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=__UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=__UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=__UpperCAmelCase , text_config=__UpperCAmelCase )
return config, image_size
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
__SCREAMING_SNAKE_CASE = tokenizer("""\n""" , add_special_tokens=__UpperCAmelCase ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_blipa_config(__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(__UpperCAmelCase ).eval()
__SCREAMING_SNAKE_CASE = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__SCREAMING_SNAKE_CASE = """cuda""" if torch.cuda.is_available() else """cpu"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=__UpperCAmelCase , model_type=__UpperCAmelCase , is_eval=__UpperCAmelCase , device=__UpperCAmelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__SCREAMING_SNAKE_CASE = original_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(__UpperCAmelCase )
if key.startswith("""Qformer.bert""" ):
__SCREAMING_SNAKE_CASE = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
__SCREAMING_SNAKE_CASE = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
__SCREAMING_SNAKE_CASE = key.replace("""t5""" , """language""" )
__SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hf_model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE = load_demo_image()
__SCREAMING_SNAKE_CASE = vis_processors["""eval"""](__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(__UpperCAmelCase )
# create processor
__SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = processor(images=__UpperCAmelCase , return_tensors="""pt""" ).pixel_values.to(__UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
original_model.to(__UpperCAmelCase )
hf_model.to(__UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
__SCREAMING_SNAKE_CASE = hf_model(__UpperCAmelCase , __UpperCAmelCase ).logits
else:
__SCREAMING_SNAKE_CASE = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
__SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE = hf_model(__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__UpperCAmelCase )
else:
# cast to same type
__SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(__UpperCAmelCase ) , __UpperCAmelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , return_tensors="""pt""" ).input_ids.to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = original_model.generate({"""image""": original_pixel_values} )
__SCREAMING_SNAKE_CASE = hf_model.generate(
__UpperCAmelCase , __UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = input_ids.shape[1]
__SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print("""HF generation:""" , __UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
a = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 13 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
if rng is None:
__SCREAMING_SNAKE_CASE = random.Random()
__SCREAMING_SNAKE_CASE = 1
for dim in shape:
total_dims *= dim
__SCREAMING_SNAKE_CASE = []
for _ in range(__UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase , dtype=jnp.intaa ).reshape(__UpperCAmelCase )
return output
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor(__UpperCAmelCase , vocab_size=2 , rng=__UpperCAmelCase )
# make sure that at least one token is attended to for each batch
__SCREAMING_SNAKE_CASE = 1
return attn_mask
@require_flax
class __a :
__UpperCamelCase : int = None
__UpperCamelCase : Dict = ()
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = inputs["""input_ids"""].shape[-1] // 2
__SCREAMING_SNAKE_CASE = inputs["""input_ids"""][:max_batch_size, :sequence_length]
__SCREAMING_SNAKE_CASE = jnp.ones_like(lowerCamelCase )
__SCREAMING_SNAKE_CASE = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__SCREAMING_SNAKE_CASE = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__SCREAMING_SNAKE_CASE = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = 0
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class.__name__[4:] # Skip the "Flax" at the beginning
__SCREAMING_SNAKE_CASE = getattr(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = pt_model_class(lowerCamelCase ).eval()
__SCREAMING_SNAKE_CASE = load_flax_weights_in_pytorch_model(lowerCamelCase ,flax_model.params )
__SCREAMING_SNAKE_CASE = flax_model.generate(lowerCamelCase ).sequences
__SCREAMING_SNAKE_CASE = pt_model.generate(torch.tensor(lowerCamelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__SCREAMING_SNAKE_CASE = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = 2
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 2
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = 0.8
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = 0.3
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 9
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
__SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,attention_mask=lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ,attention_mask=lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
__SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,attention_mask=lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ,attention_mask=lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_input_ids_and_config()
# pad attention mask on the left
__SCREAMING_SNAKE_CASE = attention_mask.at[(0, 0)].set(0 )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = max_length
for model_class in self.all_generative_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,attention_mask=lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = jit(model.generate )
__SCREAMING_SNAKE_CASE = jit_generate(lowerCamelCase ,attention_mask=lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
__SCREAMING_SNAKE_CASE = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
__SCREAMING_SNAKE_CASE = """Hello world"""
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCamelCase ,"""do_samples""" ):
model.generate(lowerCamelCase ,do_samples=lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCamelCase ,"""foo""" ):
__SCREAMING_SNAKE_CASE = {"""foo""": """bar"""}
model.generate(lowerCamelCase ,**lowerCamelCase )
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 1 |
'''simple docstring'''
from ....utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
def __init__( self : Tuple ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : Optional[Any]=2048 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = config.__dict__
__SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
__SCREAMING_SNAKE_CASE = num_labels
| 13 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 1 |
'''simple docstring'''
import argparse
import os
import re
a = "src/diffusers"
# Pattern that looks at the indentation in a line.
a = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
a = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
a = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a = re.compile(r"\[([^\]]+)\]")
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = _re_indent.search(__UpperCAmelCase )
return "" if search is None else search.groups()[0]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__UpperCAmelCase ):
index += 1
__SCREAMING_SNAKE_CASE = ["""\n""".join(lines[:index] )]
else:
__SCREAMING_SNAKE_CASE = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__SCREAMING_SNAKE_CASE = [lines[index]]
index += 1
while index < len(__UpperCAmelCase ) and (end_prompt is None or not lines[index].startswith(__UpperCAmelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__UpperCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__UpperCAmelCase ) )
if index < len(__UpperCAmelCase ) - 1:
__SCREAMING_SNAKE_CASE = [lines[index + 1]]
index += 1
else:
__SCREAMING_SNAKE_CASE = []
else:
blocks.append("""\n""".join(__UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__UpperCAmelCase ) > 0:
blocks.append("""\n""".join(__UpperCAmelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__UpperCAmelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
def _inner(__UpperCAmelCase ):
return key(__UpperCAmelCase ).lower().replace("""_""" , """""" )
return _inner
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
def noop(__UpperCAmelCase ):
return x
if key is None:
__SCREAMING_SNAKE_CASE = noop
# Constants are all uppercase, they go first.
__SCREAMING_SNAKE_CASE = [obj for obj in objects if key(__UpperCAmelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__SCREAMING_SNAKE_CASE = [obj for obj in objects if key(__UpperCAmelCase )[0].isupper() and not key(__UpperCAmelCase ).isupper()]
# Functions begin with a lowercase, they go last.
__SCREAMING_SNAKE_CASE = [obj for obj in objects if not key(__UpperCAmelCase )[0].isupper()]
__SCREAMING_SNAKE_CASE = ignore_underscore(__UpperCAmelCase )
return sorted(__UpperCAmelCase , key=__UpperCAmelCase ) + sorted(__UpperCAmelCase , key=__UpperCAmelCase ) + sorted(__UpperCAmelCase , key=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
def _replace(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
__SCREAMING_SNAKE_CASE = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__SCREAMING_SNAKE_CASE = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(__UpperCAmelCase )] ) + "]"
__SCREAMING_SNAKE_CASE = import_statement.split("""\n""" )
if len(__UpperCAmelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__SCREAMING_SNAKE_CASE = 2 if lines[1].strip() == """[""" else 1
__SCREAMING_SNAKE_CASE = [(i, _re_strip_line.search(__UpperCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__SCREAMING_SNAKE_CASE = sort_objects(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] )
__SCREAMING_SNAKE_CASE = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__UpperCAmelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__SCREAMING_SNAKE_CASE = _re_bracket_content.sub(_replace , lines[1] )
else:
__SCREAMING_SNAKE_CASE = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__SCREAMING_SNAKE_CASE = keys[:-1]
__SCREAMING_SNAKE_CASE = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(__UpperCAmelCase )] )
return "\n".join(__UpperCAmelCase )
else:
# Finally we have to deal with imports fitting on one line
__SCREAMING_SNAKE_CASE = _re_bracket_content.sub(_replace , __UpperCAmelCase )
return import_statement
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=True ) -> str:
'''simple docstring'''
with open(__UpperCAmelCase , """r""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__SCREAMING_SNAKE_CASE = split_code_in_indented_blocks(
__UpperCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__UpperCAmelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__SCREAMING_SNAKE_CASE = main_blocks[block_idx]
__SCREAMING_SNAKE_CASE = block.split("""\n""" )
# Get to the start of the imports.
__SCREAMING_SNAKE_CASE = 0
while line_idx < len(__UpperCAmelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
else:
line_idx += 1
if line_idx >= len(__UpperCAmelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
__SCREAMING_SNAKE_CASE = """\n""".join(block_lines[line_idx:-1] )
__SCREAMING_SNAKE_CASE = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__SCREAMING_SNAKE_CASE = split_code_in_indented_blocks(__UpperCAmelCase , indent_level=__UpperCAmelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
__SCREAMING_SNAKE_CASE = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__SCREAMING_SNAKE_CASE = [(pattern.search(__UpperCAmelCase ).groups()[0] if pattern.search(__UpperCAmelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__SCREAMING_SNAKE_CASE = [(i, key) for i, key in enumerate(__UpperCAmelCase ) if key is not None]
__SCREAMING_SNAKE_CASE = [x[0] for x in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = []
for i in range(len(__UpperCAmelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__SCREAMING_SNAKE_CASE = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__UpperCAmelCase )
count += 1
# And we put our main block back together with its first and last line.
__SCREAMING_SNAKE_CASE = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__UpperCAmelCase ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(__UpperCAmelCase , """w""" ) as f:
f.write("""\n""".join(__UpperCAmelCase ) )
def __magic_name__ ( __UpperCAmelCase=True ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
__SCREAMING_SNAKE_CASE = sort_imports(os.path.join(__UpperCAmelCase , """__init__.py""" ) , check_only=__UpperCAmelCase )
if result:
__SCREAMING_SNAKE_CASE = [os.path.join(__UpperCAmelCase , """__init__.py""" )]
if len(__UpperCAmelCase ) > 0:
raise ValueError(f"""Would overwrite {len(__UpperCAmelCase )} files, run `make style`.""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 13 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
return "".join(chr(ord(__UpperCAmelCase ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,lowerCamelCase )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,**lowerCamelCase : int ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : str ,**lowerCamelCase : int ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=lowerCamelCase )
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 13 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a = TypeVar("KT")
a = TypeVar("VT")
class __a ( Generic[KT, VT] ):
def __init__( self : Any ,lowerCamelCase : KT | str = "root" ,lowerCamelCase : VT | None = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = []
def __repr__( self : int ):
'''simple docstring'''
return f"""Node({self.key}: {self.value})"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return len(self.forward )
class __a ( Generic[KT, VT] ):
def __init__( self : int ,lowerCamelCase : float = 0.5 ,lowerCamelCase : int = 16 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Node[KT, VT]()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = p
__SCREAMING_SNAKE_CASE = max_level
def __str__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(self )
if len(lowerCamelCase ) == 0:
return f"""SkipList(level={self.level})"""
__SCREAMING_SNAKE_CASE = max((len(str(lowerCamelCase ) ) for item in items) ,default=4 )
__SCREAMING_SNAKE_CASE = max(lowerCamelCase ,4 ) + 4
__SCREAMING_SNAKE_CASE = self.head
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(lowerCamelCase ,"""-""" ) + """* """ * len(lowerCamelCase ) )
lines.append(""" """ * label_size + """| """ * len(lowerCamelCase ) )
while len(node.forward ) != 0:
__SCREAMING_SNAKE_CASE = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(lowerCamelCase ,"""-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = node.forward
lines.append("""None""".ljust(lowerCamelCase ) + """* """ * len(lowerCamelCase ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(lowerCamelCase )
def __iter__( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__SCREAMING_SNAKE_CASE = node.forward[0]
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__SCREAMING_SNAKE_CASE = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowerCamelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : KT ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._locate_node(lowerCamelCase )
if node is not None:
for i, update_node in enumerate(lowerCamelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__SCREAMING_SNAKE_CASE = node.forward[i]
else:
__SCREAMING_SNAKE_CASE = update_node.forward[:i]
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : KT ,lowerCamelCase : VT ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._locate_node(lowerCamelCase )
if node is not None:
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,lowerCamelCase ):
update_vector.append(self.head )
__SCREAMING_SNAKE_CASE = level
__SCREAMING_SNAKE_CASE = Node(lowerCamelCase ,lowerCamelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = new_node
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : VT ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._locate_node(lowerCamelCase )
if node is not None:
return node.value
return None
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
__SCREAMING_SNAKE_CASE = skip_list.head
__SCREAMING_SNAKE_CASE = {}
while node.level != 0:
__SCREAMING_SNAKE_CASE = node.forward[0]
__SCREAMING_SNAKE_CASE = node.value
assert len(__UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __magic_name__ ( ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
__SCREAMING_SNAKE_CASE = skip_list.head
__SCREAMING_SNAKE_CASE = {}
while node.level != 0:
__SCREAMING_SNAKE_CASE = node.forward[0]
__SCREAMING_SNAKE_CASE = node.value
if len(__UpperCAmelCase ) != 4:
print()
assert len(__UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __magic_name__ ( ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
assert skip_list.find("""Some key""" ) is None
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __magic_name__ ( ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __magic_name__ ( ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __magic_name__ ( ) -> Any:
'''simple docstring'''
def is_sorted(__UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(__UpperCAmelCase , lst[1:] ) )
__SCREAMING_SNAKE_CASE = SkipList()
for i in range(10 ):
skip_list.insert(__UpperCAmelCase , __UpperCAmelCase )
assert is_sorted(list(__UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__UpperCAmelCase ) )
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 13 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
__SCREAMING_SNAKE_CASE = 0.0
for num in arr:
__SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
__SCREAMING_SNAKE_CASE = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 13 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 1 |
'''simple docstring'''
import sys
a = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
for digit in s:
product *= int(__UpperCAmelCase )
return product
def __magic_name__ ( __UpperCAmelCase = N ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = -sys.maxsize - 1
__SCREAMING_SNAKE_CASE = n[:13]
__SCREAMING_SNAKE_CASE = 13
while cur_index < len(__UpperCAmelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__SCREAMING_SNAKE_CASE = substr[1:] + n[cur_index]
cur_index += 1
else:
__SCREAMING_SNAKE_CASE = max(__UpperCAmelCase , str_eval(__UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Union[str, Any] = BartphoTokenizer
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = True
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__SCREAMING_SNAKE_CASE = BartphoTokenizer(lowerCamelCase ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """This is a là test"""
__SCREAMING_SNAKE_CASE = """This is a<unk><unk> test"""
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BartphoTokenizer(lowerCamelCase ,self.monolingual_vocab_file ,**self.special_tokens_map )
__SCREAMING_SNAKE_CASE = """This is a là test"""
__SCREAMING_SNAKE_CASE = """▁This ▁is ▁a ▁l à ▁t est""".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,lowerCamelCase )
| 13 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = u
for i in range(1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = temp * (u - i)
return temp
def __magic_name__ ( ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(input("""enter the numbers of values: """ ) )
__SCREAMING_SNAKE_CASE = []
for _ in range(__UpperCAmelCase ):
y.append([] )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
y[i].append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 0
print("""enter the values of parameters in a list: """ )
__SCREAMING_SNAKE_CASE = list(map(__UpperCAmelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = float(input() )
__SCREAMING_SNAKE_CASE = int(input("""enter the value to interpolate: """ ) )
__SCREAMING_SNAKE_CASE = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __UpperCAmelCase ):
for j in range(n - i ):
__SCREAMING_SNAKE_CASE = y[j + 1][i - 1] - y[j][i - 1]
__SCREAMING_SNAKE_CASE = y[0][0]
for i in range(1 , __UpperCAmelCase ):
summ += (ucal(__UpperCAmelCase , __UpperCAmelCase ) * y[0][i]) / math.factorial(__UpperCAmelCase )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a = [ord(letter) for letter in string.ascii_lowercase]
a = {ord(char) for char in VALID_CHARS}
a = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ""
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def __magic_name__ ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for key in product(__UpperCAmelCase , repeat=3 ):
__SCREAMING_SNAKE_CASE = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def __magic_name__ ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding="""utf-8""" )
__SCREAMING_SNAKE_CASE = [int(__UpperCAmelCase ) for number in data.strip().split(""",""" )]
__SCREAMING_SNAKE_CASE = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
__SCREAMING_SNAKE_CASE = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
__SCREAMING_SNAKE_CASE = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a = logging.get_logger(__name__)
a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class __a ( _snake_case ):
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Any = RoFormerTokenizer
def __init__( self : Optional[int] ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Tuple=True ,lowerCamelCase : List[str]="[UNK]" ,lowerCamelCase : Optional[int]="[SEP]" ,lowerCamelCase : Union[str, Any]="[PAD]" ,lowerCamelCase : List[str]="[CLS]" ,lowerCamelCase : Optional[Any]="[MASK]" ,lowerCamelCase : int=True ,lowerCamelCase : Dict=None ,**lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(
lowerCamelCase ,tokenizer_file=lowerCamelCase ,do_lower_case=lowerCamelCase ,unk_token=lowerCamelCase ,sep_token=lowerCamelCase ,pad_token=lowerCamelCase ,cls_token=lowerCamelCase ,mask_token=lowerCamelCase ,tokenize_chinese_chars=lowerCamelCase ,strip_accents=lowerCamelCase ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" ,lowerCamelCase ) != do_lower_case
or pre_tok_state.get("""strip_accents""" ,lowerCamelCase ) != strip_accents
):
__SCREAMING_SNAKE_CASE = getattr(lowerCamelCase ,pre_tok_state.pop("""type""" ) )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = strip_accents
__SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = do_lower_case
def __getstate__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = BertPreTokenizer()
return state
def __setstate__( self : List[Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = d
__SCREAMING_SNAKE_CASE = self.__dict__["""_tokenizer"""].get_vocab()
__SCREAMING_SNAKE_CASE = PreTokenizer.custom(JiebaPreTokenizer(lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : int ,lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCamelCase ,name=lowerCamelCase )
return tuple(lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ,lowerCamelCase : Dict=None ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]=False ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BertPreTokenizer()
return super().save_pretrained(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,**lowerCamelCase )
| 13 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 16
a = 32
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__SCREAMING_SNAKE_CASE = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE = 8
else:
__SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
__UpperCAmelCase , padding="""longest""" , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCAmelCase ) == "1":
__SCREAMING_SNAKE_CASE = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
__SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE = config["""lr"""]
__SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
__SCREAMING_SNAKE_CASE = int(config["""seed"""] )
__SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
set_seed(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__SCREAMING_SNAKE_CASE = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__SCREAMING_SNAKE_CASE = batch_size // MAX_GPU_BATCH_SIZE
__SCREAMING_SNAKE_CASE = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=__UpperCAmelCase )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__SCREAMING_SNAKE_CASE = os.path.split(__UpperCAmelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__UpperCAmelCase , __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__SCREAMING_SNAKE_CASE = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __UpperCAmelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(__UpperCAmelCase ),
"""epoch""": epoch,
} , step=__UpperCAmelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__UpperCAmelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__SCREAMING_SNAKE_CASE = []
for char_count in range(__UpperCAmelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 13 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = UniSpeechSatForSequenceClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = downstream_dict["""projector.weight"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""projector.bias"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""model.post_net.linear.weight"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""model.post_net.linear.bias"""]
return model
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = UniSpeechSatForAudioFrameClassification.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = downstream_dict["""model.linear.weight"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""model.linear.bias"""]
return model
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = UniSpeechSatForXVector.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = downstream_dict["""connector.weight"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__SCREAMING_SNAKE_CASE = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__SCREAMING_SNAKE_CASE = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__SCREAMING_SNAKE_CASE = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.load(__UpperCAmelCase , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = checkpoint["""Downstream"""]
__SCREAMING_SNAKE_CASE = UniSpeechSatConfig.from_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , do_normalize=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__SCREAMING_SNAKE_CASE = convert_classification(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
__SCREAMING_SNAKE_CASE = convert_diarization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
elif arch.endswith("""ForXVector""" ):
__SCREAMING_SNAKE_CASE = convert_xvector(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
__SCREAMING_SNAKE_CASE = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
a = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 13 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __a ( _snake_case ):
def __init__( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int]=13 ,lowerCamelCase : Optional[Any]=7 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : int=True ,lowerCamelCase : Dict=True ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Optional[int]=99 ,lowerCamelCase : Tuple=32 ,lowerCamelCase : List[str]=5 ,lowerCamelCase : int=4 ,lowerCamelCase : Optional[Any]=37 ,lowerCamelCase : List[Any]="gelu" ,lowerCamelCase : Any=0.1 ,lowerCamelCase : int=0.1 ,lowerCamelCase : Optional[Any]=512 ,lowerCamelCase : Optional[int]=16 ,lowerCamelCase : str=2 ,lowerCamelCase : Union[str, Any]=0.02 ,lowerCamelCase : int=False ,lowerCamelCase : Dict=True ,lowerCamelCase : Any="None" ,lowerCamelCase : Optional[int]=3 ,lowerCamelCase : List[str]=4 ,lowerCamelCase : Union[str, Any]=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = position_biased_input
__SCREAMING_SNAKE_CASE = pos_att_type
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Dict ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Dict ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DebertaVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ,token_type_ids=lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,token_type_ids=lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Any ,lowerCamelCase : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ,lowerCamelCase : List[str] ,lowerCamelCase : Any ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DebertaVaForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ,lowerCamelCase : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaVaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Dict ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DebertaVaForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DebertaVaForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(
lowerCamelCase ,attention_mask=lowerCamelCase ,token_type_ids=lowerCamelCase ,start_positions=lowerCamelCase ,end_positions=lowerCamelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : Dict ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DebertaVaForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
lowerCamelCase ,attention_mask=lowerCamelCase ,token_type_ids=lowerCamelCase ,labels=lowerCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Dict = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : str = False
__UpperCamelCase : Dict = False
__UpperCamelCase : int = False
__UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DebertaVaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase )[0]
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowerCamelCase ,atol=1E-4 ) ,f"""{output[:, 1:4, 1:4]}""" )
| 13 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
__SCREAMING_SNAKE_CASE = False
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__SCREAMING_SNAKE_CASE = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = plt.imshow(__UpperCAmelCase )
fig.axes.get_xaxis().set_visible(__UpperCAmelCase )
fig.axes.get_yaxis().set_visible(__UpperCAmelCase )
plt.show()
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = datetime.now()
__SCREAMING_SNAKE_CASE = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 13 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.