code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import numpy as np class __a : '''simple docstring''' def __init__( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Dict: """simple docstring""" self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a ) def _a ( self , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Optional[Any]: """simple docstring""" if red is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = red if green is not None: SCREAMING_SNAKE_CASE__ : Tuple = green if blue is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = blue if red_edge is not None: SCREAMING_SNAKE_CASE__ : List[str] = red_edge if nir is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = nir return True def _a ( self , _a="" , _a=None , _a=None , _a=None , _a=None , _a=None ) -> Optional[Any]: """simple docstring""" self.set_matricies(red=_a , green=_a , blue=_a , red_edge=_a , nir=_a ) SCREAMING_SNAKE_CASE__ : Dict = { """ARVI2""": self.arvaa, """CCCI""": self.ccci, """CVI""": self.cvi, """GLI""": self.gli, """NDVI""": self.ndvi, """BNDVI""": self.bndvi, """redEdgeNDVI""": self.red_edge_ndvi, """GNDVI""": self.gndvi, """GBNDVI""": self.gbndvi, """GRNDVI""": self.grndvi, """RBNDVI""": self.rbndvi, """PNDVI""": self.pndvi, """ATSAVI""": self.atsavi, """BWDRVI""": self.bwdrvi, """CIgreen""": self.ci_green, """CIrededge""": self.ci_rededge, """CI""": self.ci, """CTVI""": self.ctvi, """GDVI""": self.gdvi, """EVI""": self.evi, """GEMI""": self.gemi, """GOSAVI""": self.gosavi, """GSAVI""": self.gsavi, """Hue""": self.hue, """IVI""": self.ivi, """IPVI""": self.ipvi, """I""": self.i, """RVI""": self.rvi, """MRVI""": self.mrvi, """MSAVI""": self.m_savi, """NormG""": self.norm_g, """NormNIR""": self.norm_nir, """NormR""": self.norm_r, """NGRDI""": self.ngrdi, """RI""": self.ri, """S""": self.s, """IF""": self._if, """DVI""": self.dvi, """TVI""": self.tvi, """NDRE""": self.ndre, } try: return funcs[index]() except KeyError: print("""Index not in the list!""" ) return False def _a ( self ) -> Optional[int]: """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def _a ( self ) -> List[Any]: """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _a ( self ) -> Union[str, Any]: """simple docstring""" return self.nir * (self.red / (self.green**2)) def _a ( self ) -> int: """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _a ( self ) -> List[str]: """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def _a ( self ) -> List[str]: """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def _a ( self ) -> Dict: """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def _a ( self ) -> List[Any]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def _a ( self ) -> Union[str, Any]: """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _a ( self ) -> Optional[Any]: """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _a ( self ) -> Optional[Any]: """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _a ( self ) -> Optional[Any]: """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _a ( self , _a=0.08 , _a=1.22 , _a=0.03 ) -> List[str]: """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _a ( self ) -> Optional[Any]: """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _a ( self ) -> List[str]: """simple docstring""" return (self.nir / self.green) - 1 def _a ( self ) -> Tuple: """simple docstring""" return (self.nir / self.redEdge) - 1 def _a ( self ) -> List[Any]: """simple docstring""" return (self.red - self.blue) / self.red def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _a ( self ) -> Tuple: """simple docstring""" return self.nir - self.green def _a ( self ) -> Optional[Any]: """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def _a ( self , _a=0.16 ) -> str: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def _a ( self , _a=0.5 ) -> Optional[Any]: """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _a ( self ) -> List[Any]: """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def _a ( self , _a=None , _a=None ) -> Optional[int]: """simple docstring""" return (self.nir - b) / (a * self.red) def _a ( self ) -> Dict: """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _a ( self ) -> Optional[Any]: """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def _a ( self ) -> int: """simple docstring""" return self.nir / self.red def _a ( self ) -> Dict: """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def _a ( self ) -> Optional[int]: """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _a ( self ) -> List[str]: """simple docstring""" return self.green / (self.nir + self.red + self.green) def _a ( self ) -> Tuple: """simple docstring""" return self.nir / (self.nir + self.red + self.green) def _a ( self ) -> Optional[int]: """simple docstring""" return self.red / (self.nir + self.red + self.green) def _a ( self ) -> str: """simple docstring""" return (self.green - self.red) / (self.green + self.red) def _a ( self ) -> List[Any]: """simple docstring""" return (self.red - self.green) / (self.red + self.green) def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) SCREAMING_SNAKE_CASE__ : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _a ( self ) -> List[Any]: """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _a ( self ) -> Dict: """simple docstring""" return self.nir / self.red def _a ( self ) -> Optional[int]: """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def _a ( self ) -> Union[str, Any]: """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
703
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __a (unittest.TestCase): '''simple docstring''' @slow def _a ( self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )["""last_hidden_state"""] SCREAMING_SNAKE_CASE__ : List[str] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _a ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
12
0
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_) class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True}) _SCREAMING_SNAKE_CASE :ClassVar[Features] = Features({"""question""": Value("""string"""), """context""": Value("""string""")}) _SCREAMING_SNAKE_CASE :ClassVar[Features] = Features( { """answers""": Sequence( { """text""": Value("""string"""), """answer_start""": Value("""int32"""), }) }) _SCREAMING_SNAKE_CASE :str = "question" _SCREAMING_SNAKE_CASE :str = "context" _SCREAMING_SNAKE_CASE :str = "answers" @property def _a ( self ) -> Dict[str, str]: """simple docstring""" return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
704
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a :List[Any] = logging.get_logger(__name__) a :Optional[int] = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class __a (UpperCamelCase_ , UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Any = """focalnet""" def __init__( self , _a=224 , _a=4 , _a=3 , _a=96 , _a=False , _a=[192, 384, 768, 768] , _a=[2, 2, 6, 2] , _a=[2, 2, 2, 2] , _a=[3, 3, 3, 3] , _a="gelu" , _a=4.0 , _a=0.0 , _a=0.1 , _a=False , _a=1E-4 , _a=False , _a=False , _a=False , _a=0.02 , _a=1E-5 , _a=32 , _a=None , _a=None , **_a , ) -> Optional[Any]: """simple docstring""" super().__init__(**_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_size SCREAMING_SNAKE_CASE__ : str = patch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE__ : Union[str, Any] = embed_dim SCREAMING_SNAKE_CASE__ : List[str] = use_conv_embed SCREAMING_SNAKE_CASE__ : List[str] = hidden_sizes SCREAMING_SNAKE_CASE__ : Optional[int] = depths SCREAMING_SNAKE_CASE__ : Any = focal_levels SCREAMING_SNAKE_CASE__ : Optional[Any] = focal_windows SCREAMING_SNAKE_CASE__ : Any = hidden_act SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = drop_path_rate SCREAMING_SNAKE_CASE__ : str = use_layerscale SCREAMING_SNAKE_CASE__ : int = layerscale_value SCREAMING_SNAKE_CASE__ : Optional[int] = use_post_layernorm SCREAMING_SNAKE_CASE__ : Any = use_post_layernorm_in_modulation SCREAMING_SNAKE_CASE__ : Union[str, Any] = normalize_modulator SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps SCREAMING_SNAKE_CASE__ : Any = encoder_stride SCREAMING_SNAKE_CASE__ : Optional[int] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_aligned_output_features_output_indices( out_features=_a , out_indices=_a , stage_names=self.stage_names )
12
0
"""simple docstring""" import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __a (UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :Any = FlaxAutoencoderKL @property def _a ( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = 4 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = (32, 32) SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ : Any = jax.random.uniform(_a , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_input return init_dict, inputs_dict
705
"""simple docstring""" import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __a (unittest.TestCase): '''simple docstring''' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length SCREAMING_SNAKE_CASE__ : Optional[int] = is_training SCREAMING_SNAKE_CASE__ : Optional[Any] = use_attention_mask SCREAMING_SNAKE_CASE__ : Tuple = use_token_type_ids SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : int = vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : int = hidden_act SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : int = initializer_range SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE__ : int = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Tuple = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = config_and_inputs SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class __a (UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :Any = True _SCREAMING_SNAKE_CASE :Optional[Any] = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxRoFormerModelTester(self ) @slow def _a ( self ) -> int: """simple docstring""" for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_a ) SCREAMING_SNAKE_CASE__ : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a ) @require_flax class __a (unittest.TestCase): '''simple docstring''' @slow def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) SCREAMING_SNAKE_CASE__ : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE__ : str = model(_a )[0] SCREAMING_SNAKE_CASE__ : List[Any] = 50_000 SCREAMING_SNAKE_CASE__ : Optional[Any] = (1, 6, vocab_size) self.assertEqual(output.shape , _a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
12
0
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __a (UpperCamelCase_): '''simple docstring''' def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> Union[str, Any]: """simple docstring""" super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) SCREAMING_SNAKE_CASE__ : List[Any] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : Optional[int] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits SCREAMING_SNAKE_CASE__ : str = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class __a : '''simple docstring''' def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Any: """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) SCREAMING_SNAKE_CASE__ : int = dataset SCREAMING_SNAKE_CASE__ : Any = name SCREAMING_SNAKE_CASE__ : Optional[Any] = con SCREAMING_SNAKE_CASE__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE SCREAMING_SNAKE_CASE__ : int = num_proc SCREAMING_SNAKE_CASE__ : int = to_sql_kwargs def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.to_sql_kwargs.pop("""sql""" , _a ) SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""con""" , _a ) SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""index""" , _a ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(index=_a , **self.to_sql_kwargs ) return written def _a ( self , _a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = args SCREAMING_SNAKE_CASE__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs SCREAMING_SNAKE_CASE__ : Any = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) SCREAMING_SNAKE_CASE__ : Optional[int] = batch.to_pandas() SCREAMING_SNAKE_CASE__ : List[Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def _a ( self , _a , **_a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: SCREAMING_SNAKE_CASE__ : str = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
706
"""simple docstring""" a :List[str] = [ (1_000, "M"), (900, "CM"), (500, "D"), (400, "CD"), (100, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def _lowercase ( __lowerCAmelCase ) -> int: SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : List[str] = 0 while place < len(__lowerCAmelCase ): if (place + 1 < len(__lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def _lowercase ( __lowerCAmelCase ) -> str: SCREAMING_SNAKE_CASE__ : Any = [] for arabic, roman in ROMAN: ((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : List[str] = divmod(__lowerCAmelCase , __lowerCAmelCase ) result.append(roman * factor ) if number == 0: break return "".join(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __a (UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[Any] = RoCBertTokenizer _SCREAMING_SNAKE_CASE :Tuple = None _SCREAMING_SNAKE_CASE :Dict = False _SCREAMING_SNAKE_CASE :int = True _SCREAMING_SNAKE_CASE :Any = filter_non_english def _a ( self ) -> Optional[int]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] SCREAMING_SNAKE_CASE__ : List[Any] = {} SCREAMING_SNAKE_CASE__ : List[Any] = {} for i, value in enumerate(_a ): SCREAMING_SNAKE_CASE__ : List[Any] = i SCREAMING_SNAKE_CASE__ : str = i SCREAMING_SNAKE_CASE__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer: json.dump(_a , _a , ensure_ascii=_a ) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer: json.dump(_a , _a , ensure_ascii=_a ) def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize("""你好[SEP]你是谁""" ) self.assertListEqual(_a , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_a ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_a ) , [5, 6, 2, 5, 7, 8] ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = RoCBertBasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = RoCBertBasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_a , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] SCREAMING_SNAKE_CASE__ : Optional[Any] = {} for i, token in enumerate(_a ): SCREAMING_SNAKE_CASE__ : Optional[Any] = i SCREAMING_SNAKE_CASE__ : List[str] = RoCBertWordpieceTokenizer(vocab=_a , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def _a ( self ) -> Any: """simple docstring""" self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def _a ( self ) -> Tuple: """simple docstring""" self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def _a ( self ) -> Any: """simple docstring""" self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) if self.test_rust_tokenizer: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) def _a ( self ) -> Optional[int]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : List[Any] = self.rust_tokenizer_class.from_pretrained(_a , **_a ) SCREAMING_SNAKE_CASE__ : Dict = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_r.do_lower_case if hasattr(_a , """do_lower_case""" ) else False SCREAMING_SNAKE_CASE__ : Optional[int] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def _a ( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = ["""的""", """人""", """有"""] SCREAMING_SNAKE_CASE__ : Dict = """""".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = True SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class.from_pretrained(_a , **_a ) SCREAMING_SNAKE_CASE__ : str = self.rust_tokenizer_class.from_pretrained(_a , **_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_p.encode(_a , add_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_r.encode(_a , add_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_r.convert_ids_to_tokens(_a ) SCREAMING_SNAKE_CASE__ : Any = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Tuple = self.rust_tokenizer_class.from_pretrained(_a , **_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(_a , **_a ) SCREAMING_SNAKE_CASE__ : str = tokenizer_r.encode(_a , add_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer_p.encode(_a , add_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_r.convert_ids_to_tokens(_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". SCREAMING_SNAKE_CASE__ : Tuple = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) @slow def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode("""你好""" , add_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode("""你是谁""" , add_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.get_tokenizers(do_lower_case=_a ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE__ : Any = """你好,你是谁""" SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_tokens_to_ids(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_tokens_to_shape_ids(_a ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(_a ) SCREAMING_SNAKE_CASE__ : int = tokenizer.prepare_for_model( _a , _a , _a , add_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode_plus(_a , add_special_tokens=_a ) self.assertEqual(_a , _a )
707
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a :Any = { "configuration_roberta_prelayernorm": [ "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :Union[str, Any] = [ "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", "RobertaPreLayerNormForQuestionAnswering", "RobertaPreLayerNormForSequenceClassification", "RobertaPreLayerNormForTokenClassification", "RobertaPreLayerNormModel", "RobertaPreLayerNormPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :Optional[Any] = [ "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormMainLayer", "TFRobertaPreLayerNormModel", "TFRobertaPreLayerNormPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a :List[Any] = [ "FlaxRobertaPreLayerNormForCausalLM", "FlaxRobertaPreLayerNormForMaskedLM", "FlaxRobertaPreLayerNormForMultipleChoice", "FlaxRobertaPreLayerNormForQuestionAnswering", "FlaxRobertaPreLayerNormForSequenceClassification", "FlaxRobertaPreLayerNormForTokenClassification", "FlaxRobertaPreLayerNormModel", "FlaxRobertaPreLayerNormPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys a :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
12
0
"""simple docstring""" a :Union[str, Any] = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } a :Union[str, Any] = {value: key for key, value in encode_dict.items()} def _lowercase ( __lowerCAmelCase ) -> str: SCREAMING_SNAKE_CASE__ : Union[str, Any] = """""" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("""encode() accepts only letters of the alphabet and spaces""" ) return encoded def _lowercase ( __lowerCAmelCase ) -> str: if set(__lowerCAmelCase ) - {"A", "B", " "} != set(): raise Exception("""decode() accepts only 'A', 'B' and spaces""" ) SCREAMING_SNAKE_CASE__ : Tuple = """""" for word in coded.split(): while len(__lowerCAmelCase ) != 0: decoded += decode_dict[word[:5]] SCREAMING_SNAKE_CASE__ : str = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
708
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class __a (unittest.TestCase): '''simple docstring''' def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : Dict = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] SCREAMING_SNAKE_CASE__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073], """image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711], } SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_a , _a ) def _a ( self , **_a ) -> List[Any]: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def _a ( self , **_a ) -> List[Any]: """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def _a ( self , **_a ) -> Any: """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_a ) def _a ( self ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] SCREAMING_SNAKE_CASE__ : Optional[int] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : str = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : str = self.get_image_processor() SCREAMING_SNAKE_CASE__ : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a ) processor_slow.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_a ) SCREAMING_SNAKE_CASE__ : int = AlignProcessor(tokenizer=_a , image_processor=_a ) processor_fast.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Any = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _a ) self.assertIsInstance(processor_fast.tokenizer , _a ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _a ) self.assertIsInstance(processor_fast.image_processor , _a ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) SCREAMING_SNAKE_CASE__ : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) SCREAMING_SNAKE_CASE__ : Dict = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def _a ( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE__ : str = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : List[str] = AlignProcessor(tokenizer=_a , image_processor=_a ) SCREAMING_SNAKE_CASE__ : Any = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(_a , return_tensors="""np""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(images=_a , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.get_image_processor() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Any = AlignProcessor(tokenizer=_a , image_processor=_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = """lower newer""" SCREAMING_SNAKE_CASE__ : Optional[Any] = processor(text=_a ) SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a , padding="""max_length""" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.get_image_processor() SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Union[str, Any] = AlignProcessor(tokenizer=_a , image_processor=_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """lower newer""" SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ : Any = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a ) SCREAMING_SNAKE_CASE__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE__ : List[Any] = processor.batch_decode(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor() SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Dict = AlignProcessor(tokenizer=_a , image_processor=_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer""" SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE__ : List[str] = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
12
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a :Any = logging.get_logger(__name__) a :str = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[Any] = """decision_transformer""" _SCREAMING_SNAKE_CASE :List[str] = ["""past_key_values"""] _SCREAMING_SNAKE_CASE :str = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _a=17 , _a=4 , _a=128 , _a=4_096 , _a=True , _a=1 , _a=1_024 , _a=3 , _a=1 , _a=None , _a="relu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=False , _a=False , **_a , ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dim SCREAMING_SNAKE_CASE__ : Dict = act_dim SCREAMING_SNAKE_CASE__ : Dict = hidden_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_ep_len SCREAMING_SNAKE_CASE__ : Tuple = action_tanh SCREAMING_SNAKE_CASE__ : List[str] = vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] = n_positions SCREAMING_SNAKE_CASE__ : Optional[int] = n_layer SCREAMING_SNAKE_CASE__ : str = n_head SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_inner SCREAMING_SNAKE_CASE__ : List[Any] = activation_function SCREAMING_SNAKE_CASE__ : Optional[Any] = resid_pdrop SCREAMING_SNAKE_CASE__ : List[str] = embd_pdrop SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_pdrop SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_epsilon SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_attn_weights SCREAMING_SNAKE_CASE__ : Tuple = use_cache SCREAMING_SNAKE_CASE__ : Union[str, Any] = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE__ : int = reorder_and_upcast_attn SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id SCREAMING_SNAKE_CASE__ : Dict = eos_token_id super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
709
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a :Optional[Any] = logging.get_logger(__name__) a :Union[str, Any] = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[Any] = """t5""" _SCREAMING_SNAKE_CASE :List[str] = ["""past_key_values"""] _SCREAMING_SNAKE_CASE :Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , _a=32_128 , _a=512 , _a=64 , _a=2_048 , _a=6 , _a=None , _a=8 , _a=32 , _a=128 , _a=0.1 , _a=1E-6 , _a=1.0 , _a="relu" , _a=True , _a=True , _a=0 , _a=1 , **_a , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = d_model SCREAMING_SNAKE_CASE__ : int = d_kv SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_ff SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_layers SCREAMING_SNAKE_CASE__ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry SCREAMING_SNAKE_CASE__ : Tuple = num_heads SCREAMING_SNAKE_CASE__ : Dict = relative_attention_num_buckets SCREAMING_SNAKE_CASE__ : str = relative_attention_max_distance SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_factor SCREAMING_SNAKE_CASE__ : Tuple = feed_forward_proj SCREAMING_SNAKE_CASE__ : str = use_cache SCREAMING_SNAKE_CASE__ : List[str] = self.feed_forward_proj.split("""-""" ) SCREAMING_SNAKE_CASE__ : Dict = act_info[-1] SCREAMING_SNAKE_CASE__ : str = act_info[0] == """gated""" if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": SCREAMING_SNAKE_CASE__ : List[Any] = """gelu_new""" super().__init__( pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , ) class __a (UpperCamelCase_): '''simple docstring''' @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: SCREAMING_SNAKE_CASE__ : Tuple = """past_encoder_sequence + sequence""" SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch"""} SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 1: """decoder_sequence"""} SCREAMING_SNAKE_CASE__ : Dict = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_a , direction="""inputs""" ) return common_inputs @property def _a ( self ) -> int: """simple docstring""" return 13
12
0
"""simple docstring""" def _lowercase ( __lowerCAmelCase ) -> int: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError("""Input value must be a 'int' type""" ) return bin(__lowerCAmelCase ).count("""1""" ) if __name__ == "__main__": import doctest doctest.testmod()
710
"""simple docstring""" from __future__ import annotations import time import numpy as np a :Optional[Any] = [8, 5, 9, 7] a :List[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] a :int = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class __a : '''simple docstring''' def __init__( self , _a , _a , _a , ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = claim_vector SCREAMING_SNAKE_CASE__ : Any = allocated_resources_table SCREAMING_SNAKE_CASE__ : Any = maximum_claim_table def _a ( self ) -> list[int]: """simple docstring""" return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _a ( self ) -> list[int]: """simple docstring""" return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _a ( self ) -> list[list[int]]: """simple docstring""" return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _a ( self ) -> dict[int, list[int]]: """simple docstring""" return {self.__need().index(_a ): i for i in self.__need()} def _a ( self , **_a ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.__need() SCREAMING_SNAKE_CASE__ : Any = self.__allocated_resources_table SCREAMING_SNAKE_CASE__ : Dict = self.__available_resources() SCREAMING_SNAKE_CASE__ : Dict = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: SCREAMING_SNAKE_CASE__ : List[str] = False for each_need in need_list: SCREAMING_SNAKE_CASE__ : Dict = True for index, need in enumerate(_a ): if need > available_resources[index]: SCREAMING_SNAKE_CASE__ : Optional[int] = False break if execution: SCREAMING_SNAKE_CASE__ : Any = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: SCREAMING_SNAKE_CASE__ : Tuple = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(_a ) # update available/freed resources stack SCREAMING_SNAKE_CASE__ : Dict = np.array(_a ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(_a ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def _a ( self ) -> Any: """simple docstring""" print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(_a ) + 1}''' + """ """.join(f'''{it:>8}''' for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(_a ) + 1}''' + """ """.join(f'''{it:>8}''' for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(_a ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(_a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean a :Dict = 0 a :Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a :Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right a :int = tuple[int, int] class __a : '''simple docstring''' def __init__( self , _a , _a , _a , _a , _a , _a , ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = pos_x SCREAMING_SNAKE_CASE__ : int = pos_y SCREAMING_SNAKE_CASE__ : Any = (pos_y, pos_x) SCREAMING_SNAKE_CASE__ : List[Any] = goal_x SCREAMING_SNAKE_CASE__ : int = goal_y SCREAMING_SNAKE_CASE__ : str = g_cost SCREAMING_SNAKE_CASE__ : Optional[int] = parent SCREAMING_SNAKE_CASE__ : Dict = self.calculate_heuristic() SCREAMING_SNAKE_CASE__ : Optional[int] = self.g_cost + self.h_cost def _a ( self ) -> float: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.pos_x - self.goal_x SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_a ) + abs(_a ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , _a ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class __a : '''simple docstring''' def __init__( self , _a , _a ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a ) SCREAMING_SNAKE_CASE__ : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _a ) SCREAMING_SNAKE_CASE__ : List[str] = [self.start] SCREAMING_SNAKE_CASE__ : list[Node] = [] SCREAMING_SNAKE_CASE__ : Any = False def _a ( self ) -> list[TPosition]: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_a ) self.closed_nodes.append(_a ) SCREAMING_SNAKE_CASE__ : Dict = self.get_successors(_a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_a ) else: # retrieve the best current path SCREAMING_SNAKE_CASE__ : List[Any] = self.open_nodes.pop(self.open_nodes.index(_a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_a ) else: self.open_nodes.append(_a ) return [self.start.pos] def _a ( self , _a ) -> list[Node]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = [] for action in delta: SCREAMING_SNAKE_CASE__ : Optional[Any] = parent.pos_x + action[1] SCREAMING_SNAKE_CASE__ : str = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) ) return successors def _a ( self , _a ) -> list[TPosition]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = node SCREAMING_SNAKE_CASE__ : Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) SCREAMING_SNAKE_CASE__ : List[Any] = current_node.parent path.reverse() return path class __a : '''simple docstring''' def __init__( self , _a , _a ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = AStar(_a , _a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = AStar(_a , _a ) SCREAMING_SNAKE_CASE__ : List[str] = False def _a ( self ) -> list[TPosition]: """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() SCREAMING_SNAKE_CASE__ : Any = self.fwd_astar.open_nodes.pop(0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _a , _a ) self.fwd_astar.closed_nodes.append(_a ) self.bwd_astar.closed_nodes.append(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_bwd_node SCREAMING_SNAKE_CASE__ : Tuple = current_fwd_node SCREAMING_SNAKE_CASE__ : Tuple = { self.fwd_astar: self.fwd_astar.get_successors(_a ), self.bwd_astar: self.bwd_astar.get_successors(_a ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_a ) else: # retrieve the best current path SCREAMING_SNAKE_CASE__ : List[str] = astar.open_nodes.pop( astar.open_nodes.index(_a ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_a ) else: astar.open_nodes.append(_a ) return [self.fwd_astar.start.pos] def _a ( self , _a , _a ) -> list[TPosition]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.fwd_astar.retrace_path(_a ) SCREAMING_SNAKE_CASE__ : str = self.bwd_astar.retrace_path(_a ) bwd_path.pop() bwd_path.reverse() SCREAMING_SNAKE_CASE__ : Optional[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] a :Optional[Any] = (0, 0) a :List[Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) a :str = time.time() a :Any = AStar(init, goal) a :Optional[Any] = a_star.search() a :Union[str, Any] = time.time() - start_time print(f'AStar execution time = {end_time:f} seconds') a :Tuple = time.time() a :Any = BidirectionalAStar(init, goal) a :int = time.time() - bd_start_time print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
711
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: a :List[Any] = None a :Optional[int] = logging.get_logger(__name__) a :Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} a :Optional[int] = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } a :Dict = { "xlnet-base-cased": None, "xlnet-large-cased": None, } a :int = "▁" # Segments (not really needed) a :Dict = 0 a :Optional[int] = 1 a :Tuple = 2 a :List[str] = 3 a :Optional[Any] = 4 class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Tuple = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE :str = """left""" _SCREAMING_SNAKE_CASE :Optional[Any] = XLNetTokenizer def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3 SCREAMING_SNAKE_CASE__ : Optional[int] = do_lower_case SCREAMING_SNAKE_CASE__ : List[str] = remove_space SCREAMING_SNAKE_CASE__ : int = keep_accents SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_file SCREAMING_SNAKE_CASE__ : Tuple = False if not self.vocab_file else True def _a ( self , _a , _a = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Tuple = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _a ( self , _a , _a = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _a ( self , _a , _a = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_a ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : Tuple = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
12
0
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __a : '''simple docstring''' def __init__( self , _a , _a=14 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=4 , _a=4 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = parent SCREAMING_SNAKE_CASE__ : List[str] = batch_size SCREAMING_SNAKE_CASE__ : Tuple = seq_length SCREAMING_SNAKE_CASE__ : Dict = is_training SCREAMING_SNAKE_CASE__ : Tuple = use_input_mask SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE__ : Any = use_labels SCREAMING_SNAKE_CASE__ : int = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = hidden_size SCREAMING_SNAKE_CASE__ : List[Any] = rotary_dim SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Any = initializer_range SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Dict = vocab_size - 1 SCREAMING_SNAKE_CASE__ : str = vocab_size - 1 SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size - 1 def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def _a ( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : int = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def _a ( self , _a , _a , _a , _a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 20 SCREAMING_SNAKE_CASE__ : int = model_class_name(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = model.init_cache(input_ids.shape[0] , _a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE__ : Any = model( input_ids[:, :-1] , attention_mask=_a , past_key_values=_a , position_ids=_a , ) SCREAMING_SNAKE_CASE__ : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE__ : str = model( input_ids[:, -1:] , attention_mask=_a , past_key_values=outputs_cache.past_key_values , position_ids=_a , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a ) SCREAMING_SNAKE_CASE__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def _a ( self , _a , _a , _a , _a ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 20 SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class_name(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) SCREAMING_SNAKE_CASE__ : Any = model.init_cache(input_ids.shape[0] , _a ) SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) SCREAMING_SNAKE_CASE__ : Dict = model( input_ids[:, :-1] , attention_mask=_a , past_key_values=_a , position_ids=_a , ) SCREAMING_SNAKE_CASE__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) SCREAMING_SNAKE_CASE__ : Optional[int] = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_a , position_ids=_a , ) SCREAMING_SNAKE_CASE__ : Dict = model(_a , attention_mask=_a ) SCREAMING_SNAKE_CASE__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) @require_flax class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[str] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () _SCREAMING_SNAKE_CASE :Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else () def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = FlaxGPTJModelTester(self ) def _a ( self ) -> Optional[Any]: """simple docstring""" for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(_a , _a , _a , _a ) def _a ( self ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( _a , _a , _a , _a ) @tooslow def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) SCREAMING_SNAKE_CASE__ : Dict = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=_a , truncation=_a ) SCREAMING_SNAKE_CASE__ : Tuple = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE__ : Optional[int] = False SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.config.eos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.jit(model.generate ) SCREAMING_SNAKE_CASE__ : Optional[int] = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.batch_decode(_a , skip_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : List[str] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(_a , _a ) @is_pt_flax_cross_test def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_a , _a ) SCREAMING_SNAKE_CASE__ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(_a , _a ) SCREAMING_SNAKE_CASE__ : List[Any] = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE__ : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_a ): SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : Dict = 1 SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : Tuple = pt_model_class(_a ).eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_a , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _a ) SCREAMING_SNAKE_CASE__ : List[str] = fx_state with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = pt_model(**_a ).to_tuple() SCREAMING_SNAKE_CASE__ : Optional[Any] = fx_model(**_a ).to_tuple() self.assertEqual(len(_a ) , len(_a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(_a , _a ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_a ) SCREAMING_SNAKE_CASE__ : str = model_class.from_pretrained(_a , from_pt=_a ) SCREAMING_SNAKE_CASE__ : List[str] = fx_model_loaded(**_a ).to_tuple() self.assertEqual( len(_a ) , len(_a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(_a , _a ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_a , _a ) SCREAMING_SNAKE_CASE__ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class SCREAMING_SNAKE_CASE__ : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(_a , _a ) SCREAMING_SNAKE_CASE__ : int = pt_model_class(_a ).eval() SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_a , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ : List[str] = load_flax_weights_in_pytorch_model(_a , fx_model.params ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = pt_inputs["""input_ids"""].shape SCREAMING_SNAKE_CASE__ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_a ): SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE__ : int = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Tuple = pt_model(**_a ).to_tuple() SCREAMING_SNAKE_CASE__ : Any = fx_model(**_a ).to_tuple() self.assertEqual(len(_a ) , len(_a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(_a , _a ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = pt_model_class.from_pretrained(_a , from_flax=_a ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] = pt_model_loaded(**_a ).to_tuple() self.assertEqual( len(_a ) , len(_a ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(_a , _a ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def _a ( self ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[str] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(_a )
712
"""simple docstring""" def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool: SCREAMING_SNAKE_CASE__ : Optional[Any] = len(__lowerCAmelCase ) + 1 SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. SCREAMING_SNAKE_CASE__ : Dict = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )] # since string of zero length match pattern of zero length SCREAMING_SNAKE_CASE__ : Dict = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , __lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , __lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , __lowerCAmelCase ): for j in range(1 , __lowerCAmelCase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": SCREAMING_SNAKE_CASE__ : Any = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: SCREAMING_SNAKE_CASE__ : List[str] = 1 elif pattern[j - 2] in (input_string[i - 1], "."): SCREAMING_SNAKE_CASE__ : List[Any] = dp[i - 1][j] else: SCREAMING_SNAKE_CASE__ : Optional[int] = 0 else: SCREAMING_SNAKE_CASE__ : Dict = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") a :Any = "aab" a :Optional[Any] = "c*a*b" # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'{input_string} matches the given pattern {pattern}') else: print(f'{input_string} does not match with the given pattern {pattern}')
12
0
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __a (UpperCamelCase_ , UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Optional[int] = """pixel_values""" _SCREAMING_SNAKE_CASE :Optional[Any] = False _SCREAMING_SNAKE_CASE :str = TimmBackboneConfig def __init__( self , _a , **_a ) -> Optional[Any]: """simple docstring""" requires_backends(self , """timm""" ) super().__init__(_a ) SCREAMING_SNAKE_CASE__ : List[str] = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' ) if hasattr(_a , """out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) SCREAMING_SNAKE_CASE__ : Any = getattr(_a , """use_pretrained_backbone""" , _a ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. SCREAMING_SNAKE_CASE__ : List[str] = config.out_indices if getattr(_a , """out_indices""" , _a ) is not None else (-1,) SCREAMING_SNAKE_CASE__ : Tuple = timm.create_model( config.backbone , pretrained=_a , features_only=config.features_only , in_chans=config.num_channels , out_indices=_a , **_a , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. SCREAMING_SNAKE_CASE__ : Dict = self._backbone.return_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = {layer["""module"""]: str(_a ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(_a ) @classmethod def _a ( cls , _a , *_a , **_a ) -> Any: """simple docstring""" requires_backends(cls , ["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("""config""" , TimmBackboneConfig() ) SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("""use_timm_backbone""" , _a ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("""num_channels""" , config.num_channels ) SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop("""features_only""" , config.features_only ) SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone ) SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices ) SCREAMING_SNAKE_CASE__ : List[str] = TimmBackboneConfig( backbone=_a , num_channels=_a , features_only=_a , use_pretrained_backbone=_a , out_indices=_a , ) return super()._from_config(_a , **_a ) def _a ( self , _a ) -> List[Any]: """simple docstring""" pass def _a ( self , _a , _a=None , _a=None , _a=None , **_a ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone SCREAMING_SNAKE_CASE__ : List[Any] = self._all_layers SCREAMING_SNAKE_CASE__ : Dict = self._backbone(_a , **_a ) SCREAMING_SNAKE_CASE__ : Tuple = self._return_layers SCREAMING_SNAKE_CASE__ : Optional[Any] = tuple(hidden_states[i] for i in self.out_indices ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._backbone(_a , **_a ) SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : str = tuple(_a ) SCREAMING_SNAKE_CASE__ : Dict = tuple(_a ) if hidden_states is not None else None if not return_dict: SCREAMING_SNAKE_CASE__ : Any = (feature_maps,) if output_hidden_states: SCREAMING_SNAKE_CASE__ : Any = output + (hidden_states,) return output return BackboneOutput(feature_maps=_a , hidden_states=_a , attentions=_a )
713
"""simple docstring""" from math import sqrt def _lowercase ( __lowerCAmelCase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowercase ( __lowerCAmelCase = 1_0001 ) -> int: SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Tuple = 1 while count != nth and number < 3: number += 1 if is_prime(__lowerCAmelCase ): count += 1 while count != nth: number += 2 if is_prime(__lowerCAmelCase ): count += 1 return number if __name__ == "__main__": print(f'{solution() = }')
12
0
"""simple docstring""" import os import sys import transformers a :Union[str, Any] = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
714
"""simple docstring""" class __a : '''simple docstring''' def __init__( self , _a , _a , _a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = name SCREAMING_SNAKE_CASE__ : Optional[Any] = value SCREAMING_SNAKE_CASE__ : List[Any] = weight def __repr__( self ) -> List[Any]: """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _a ( self ) -> Dict: """simple docstring""" return self.value def _a ( self ) -> int: """simple docstring""" return self.name def _a ( self ) -> Optional[Any]: """simple docstring""" return self.weight def _a ( self ) -> Dict: """simple docstring""" return self.value / self.weight def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: SCREAMING_SNAKE_CASE__ : Any = [] for i in range(len(__lowerCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] = sorted(__lowerCAmelCase , key=__lowerCAmelCase , reverse=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0, 0.0 for i in range(len(__lowerCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _lowercase ( ) -> List[str]: pass if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _lowercase ( __lowerCAmelCase = "isbn/0140328726" ) -> dict: SCREAMING_SNAKE_CASE__ : Optional[int] = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: SCREAMING_SNAKE_CASE__ : Optional[int] = F'''{olid} is not a valid Open Library olid''' raise ValueError(__lowerCAmelCase ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def _lowercase ( __lowerCAmelCase ) -> dict: SCREAMING_SNAKE_CASE__ : Any = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } SCREAMING_SNAKE_CASE__ : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} SCREAMING_SNAKE_CASE__ : Optional[Any] = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] SCREAMING_SNAKE_CASE__ : Dict = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(__lowerCAmelCase , __lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = """, """.join(__lowerCAmelCase ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: a :Any = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(f'\nSearching Open Library for ISBN: {isbn}...\n') try: a :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}')) print("\n".join(f'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'Sorry, there are no results for ISBN: {isbn}.')
715
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: a :Optional[int] = None a :Optional[Any] = logging.get_logger(__name__) a :Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} a :Union[str, Any] = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } a :Any = { "facebook/nllb-large-en-ro": 1_024, "facebook/nllb-200-distilled-600M": 1_024, } # fmt: off a :Tuple = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Optional[Any] = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE :str = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE :int = ["""input_ids""", """attention_mask"""] _SCREAMING_SNAKE_CASE :Tuple = NllbTokenizer _SCREAMING_SNAKE_CASE :List[int] = [] _SCREAMING_SNAKE_CASE :List[int] = [] def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token SCREAMING_SNAKE_CASE__ : Optional[int] = legacy_behaviour super().__init__( vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , ) SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_file SCREAMING_SNAKE_CASE__ : str = False if not self.vocab_file else True SCREAMING_SNAKE_CASE__ : Dict = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) SCREAMING_SNAKE_CASE__ : List[str] = { lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES } SCREAMING_SNAKE_CASE__ : Dict = src_lang if src_lang is not None else """eng_Latn""" SCREAMING_SNAKE_CASE__ : List[str] = self.convert_tokens_to_ids(self._src_lang ) SCREAMING_SNAKE_CASE__ : Dict = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _a ( self ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def _a ( self , _a ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _a ( self , _a , _a = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _a ( self , _a , _a = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self , _a , _a , _a , _a , **_a ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) SCREAMING_SNAKE_CASE__ : Dict = src_lang SCREAMING_SNAKE_CASE__ : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_tokens_to_ids(_a ) SCREAMING_SNAKE_CASE__ : List[Any] = tgt_lang_id return inputs def _a ( self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ) -> BatchEncoding: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = src_lang SCREAMING_SNAKE_CASE__ : Dict = tgt_lang return super().prepare_seqaseq_batch(_a , _a , **_a ) def _a ( self ) -> Optional[Any]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _a ( self ) -> str: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _a ( self , _a ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.convert_tokens_to_ids(_a ) if self.legacy_behaviour: SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE__ : Dict = [self.cur_lang_code] SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE__ : int = self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE__ : int = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _a ( self , _a ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.convert_tokens_to_ids(_a ) if self.legacy_behaviour: SCREAMING_SNAKE_CASE__ : List[Any] = [] SCREAMING_SNAKE_CASE__ : Optional[int] = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cur_lang_code] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.eos_token_id] SCREAMING_SNAKE_CASE__ : Any = self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE__ : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _a ( self , _a , _a = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_a ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return SCREAMING_SNAKE_CASE__ : Dict = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
12
0
"""simple docstring""" from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter a :str = "Create a default config file for Accelerate with only a few flags set." def _lowercase ( __lowerCAmelCase="no" , __lowerCAmelCase = default_json_config_file , __lowerCAmelCase = False ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int = Path(__lowerCAmelCase ) path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False SCREAMING_SNAKE_CASE__ : Dict = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """compute_environment""": """LOCAL_MACHINE""", """mixed_precision""": mixed_precision, } if torch.cuda.is_available(): SCREAMING_SNAKE_CASE__ : str = torch.cuda.device_count() SCREAMING_SNAKE_CASE__ : str = num_gpus SCREAMING_SNAKE_CASE__ : str = False if num_gpus > 1: SCREAMING_SNAKE_CASE__ : str = """MULTI_GPU""" else: SCREAMING_SNAKE_CASE__ : int = """NO""" elif is_xpu_available() and use_xpu: SCREAMING_SNAKE_CASE__ : Dict = torch.xpu.device_count() SCREAMING_SNAKE_CASE__ : Optional[int] = num_xpus SCREAMING_SNAKE_CASE__ : Optional[int] = False if num_xpus > 1: SCREAMING_SNAKE_CASE__ : List[Any] = """MULTI_XPU""" else: SCREAMING_SNAKE_CASE__ : Dict = """NO""" elif is_npu_available(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.npu.device_count() SCREAMING_SNAKE_CASE__ : Any = num_npus SCREAMING_SNAKE_CASE__ : Optional[int] = False if num_npus > 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = """MULTI_NPU""" else: SCREAMING_SNAKE_CASE__ : int = """NO""" else: SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : int = True SCREAMING_SNAKE_CASE__ : Any = 1 SCREAMING_SNAKE_CASE__ : Optional[Any] = """NO""" SCREAMING_SNAKE_CASE__ : Optional[int] = ClusterConfig(**__lowerCAmelCase ) config.to_json_file(__lowerCAmelCase ) return path def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Any = parser.add_parser("""default""" , parents=__lowerCAmelCase , help=__lowerCAmelCase , formatter_class=__lowerCAmelCase ) parser.add_argument( """--config_file""" , default=__lowerCAmelCase , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , dest="""save_location""" , ) parser.add_argument( """--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=__lowerCAmelCase , help="""Whether or not to use mixed precision training. """ """Choose between FP16 and BF16 (bfloat16) training. """ """BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , ) parser.set_defaults(func=__lowerCAmelCase ) return parser def _lowercase ( __lowerCAmelCase ) -> str: SCREAMING_SNAKE_CASE__ : str = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
716
"""simple docstring""" # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Any: # Format the message. if name is None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = None else: SCREAMING_SNAKE_CASE__ : str = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}""" SCREAMING_SNAKE_CASE__ : Dict = fmt.format(__lowerCAmelCase ) # Print and recurse (if needed). if isinstance(__lowerCAmelCase , __lowerCAmelCase ): if msg is not None: print(__lowerCAmelCase ) for k in val.keys(): recursive_print(__lowerCAmelCase , val[k] , spaces + 2 ) elif isinstance(__lowerCAmelCase , torch.Tensor ): print(__lowerCAmelCase , """:""" , val.size() ) else: print(__lowerCAmelCase , """:""" , __lowerCAmelCase ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. SCREAMING_SNAKE_CASE__ : Tuple = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] SCREAMING_SNAKE_CASE__ : int = (num_heads, hidden_size, num_splits) + input_shape[1:] SCREAMING_SNAKE_CASE__ : List[str] = param.view(*__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = param.transpose(0 , 2 ) SCREAMING_SNAKE_CASE__ : List[Any] = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] SCREAMING_SNAKE_CASE__ : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:] SCREAMING_SNAKE_CASE__ : Dict = param.view(*__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : int = param.transpose(0 , 1 ).contiguous() SCREAMING_SNAKE_CASE__ : Any = param.view(*__lowerCAmelCase ) return param def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: # The converted output model. SCREAMING_SNAKE_CASE__ : List[str] = {} # old versions did not store training args SCREAMING_SNAKE_CASE__ : List[str] = input_state_dict.get("""args""" , __lowerCAmelCase ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.padded_vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] = ds_args.max_position_embeddings SCREAMING_SNAKE_CASE__ : List[Any] = ds_args.hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = ds_args.num_layers SCREAMING_SNAKE_CASE__ : Dict = ds_args.num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] = ds_args.ffn_hidden_size # pprint(config) # The number of heads. SCREAMING_SNAKE_CASE__ : List[str] = config.n_head # The hidden_size per head. SCREAMING_SNAKE_CASE__ : str = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_state_dict["""checkpoint_version"""] else: SCREAMING_SNAKE_CASE__ : Tuple = 0.0 # The model. SCREAMING_SNAKE_CASE__ : Any = input_state_dict["""model"""] # The language model. SCREAMING_SNAKE_CASE__ : Any = model["""language_model"""] # The embeddings. SCREAMING_SNAKE_CASE__ : str = lm["""embedding"""] # The word embeddings. SCREAMING_SNAKE_CASE__ : int = embeddings["""word_embeddings"""]["""weight"""] # Truncate the embedding table to vocab_size rows. SCREAMING_SNAKE_CASE__ : Any = word_embeddings[: config.vocab_size, :] SCREAMING_SNAKE_CASE__ : Optional[int] = word_embeddings # The position embeddings. SCREAMING_SNAKE_CASE__ : Any = embeddings["""position_embeddings"""]["""weight"""] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] SCREAMING_SNAKE_CASE__ : Tuple = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. SCREAMING_SNAKE_CASE__ : List[Any] = pos_embeddings # The transformer. SCREAMING_SNAKE_CASE__ : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""] # The regex to extract layer names. SCREAMING_SNAKE_CASE__ : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" ) # The simple map of names for "automated" rules. SCREAMING_SNAKE_CASE__ : Optional[int] = { """attention.dense""": """.attn.c_proj.""", """self_attention.dense""": """.attn.c_proj.""", """mlp.dense_h_to_4h""": """.mlp.c_fc.""", """mlp.dense_4h_to_h""": """.mlp.c_proj.""", } # Extract the layers. for key, val in transformer.items(): # Match the name. SCREAMING_SNAKE_CASE__ : str = layer_re.match(__lowerCAmelCase ) # Stop if that's not a layer if m is None: break # The index of the layer. SCREAMING_SNAKE_CASE__ : Dict = int(m.group(1 ) ) # The name of the operation. SCREAMING_SNAKE_CASE__ : Optional[Any] = m.group(2 ) # Is it a weight or a bias? SCREAMING_SNAKE_CASE__ : str = m.group(3 ) # The name of the layer. SCREAMING_SNAKE_CASE__ : List[Any] = F'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith("""layernorm""" ): SCREAMING_SNAKE_CASE__ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2""" SCREAMING_SNAKE_CASE__ : List[Any] = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. SCREAMING_SNAKE_CASE__ : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , __lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = causal_mask # Insert a "dummy" tensor for masked_bias. SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(-1E4 , dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : List[str] = masked_bias SCREAMING_SNAKE_CASE__ : List[str] = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. SCREAMING_SNAKE_CASE__ : str = out_val.transpose(0 , 1 ).contiguous() # Store. SCREAMING_SNAKE_CASE__ : Dict = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": SCREAMING_SNAKE_CASE__ : Any = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase ) # Store. No change of shape. SCREAMING_SNAKE_CASE__ : str = out_val # Transpose the weights. elif weight_or_bias == "weight": SCREAMING_SNAKE_CASE__ : str = megatron_to_transformers[op_name] SCREAMING_SNAKE_CASE__ : int = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": SCREAMING_SNAKE_CASE__ : int = megatron_to_transformers[op_name] SCREAMING_SNAKE_CASE__ : Dict = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. SCREAMING_SNAKE_CASE__ : Union[str, Any] = transformer["""final_layernorm.weight"""] SCREAMING_SNAKE_CASE__ : str = transformer["""final_layernorm.bias"""] # For LM head, transformers' wants the matrix to weight embeddings. SCREAMING_SNAKE_CASE__ : Tuple = word_embeddings # It should be done! return output_state_dict def _lowercase ( ) -> List[Any]: # Create the argument parser. SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" ) parser.add_argument( """path_to_checkpoint""" , type=__lowerCAmelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , ) parser.add_argument( """--config_file""" , default="""""" , type=__lowerCAmelCase , help="""An optional config json file describing the pre-trained model.""" , ) SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args() # Extract the basename. SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith(""".zip""" ): with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint: with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict: SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" ) else: SCREAMING_SNAKE_CASE__ : str = torch.load(args.path_to_checkpoint , map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ : int = input_state_dict.get("""args""" , __lowerCAmelCase ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: SCREAMING_SNAKE_CASE__ : Dict = """gelu_fast""" elif ds_args.openai_gelu: SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu_new""" else: SCREAMING_SNAKE_CASE__ : Optional[Any] = """gelu""" else: # in the very early days this used to be "gelu_new" SCREAMING_SNAKE_CASE__ : Any = """gelu_new""" # Spell out all parameters in case the defaults change. SCREAMING_SNAKE_CASE__ : Union[str, Any] = GPTaConfig( vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , ) else: SCREAMING_SNAKE_CASE__ : List[Any] = GPTaConfig.from_json_file(args.config_file ) SCREAMING_SNAKE_CASE__ : Tuple = ["""GPT2LMHeadModel"""] # Convert. print("""Converting""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__lowerCAmelCase , __lowerCAmelCase ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: SCREAMING_SNAKE_CASE__ : Tuple = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": SCREAMING_SNAKE_CASE__ : Any = """gpt2""" elif tokenizer_type == "PretrainedFromHF": SCREAMING_SNAKE_CASE__ : Any = ds_args.tokenizer_name_or_path else: raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = """gpt2""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = type(__lowerCAmelCase ).__name__ SCREAMING_SNAKE_CASE__ : Dict = tokenizer_class # Store the config to file. print("""Saving config""" ) config.save_pretrained(__lowerCAmelCase ) # Save tokenizer based on args print(F'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(__lowerCAmelCase ) # Store the state_dict to file. SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , """pytorch_model.bin""" ) print(F'''Saving checkpoint to "{output_checkpoint_file}"''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
12
0
"""simple docstring""" class __a : '''simple docstring''' def __init__( self , _a , _a , _a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = name SCREAMING_SNAKE_CASE__ : Optional[Any] = value SCREAMING_SNAKE_CASE__ : List[Any] = weight def __repr__( self ) -> List[Any]: """simple docstring""" return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _a ( self ) -> Dict: """simple docstring""" return self.value def _a ( self ) -> int: """simple docstring""" return self.name def _a ( self ) -> Optional[Any]: """simple docstring""" return self.weight def _a ( self ) -> Dict: """simple docstring""" return self.value / self.weight def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: SCREAMING_SNAKE_CASE__ : Any = [] for i in range(len(__lowerCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] = sorted(__lowerCAmelCase , key=__lowerCAmelCase , reverse=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = [] SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0, 0.0 for i in range(len(__lowerCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _lowercase ( ) -> List[str]: pass if __name__ == "__main__": import doctest doctest.testmod()
717
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __a (UpperCamelCase_): '''simple docstring''' def _a ( self , _a ) -> Union[str, Any]: """simple docstring""" with open(_a , encoding="""utf-8""" ) as input_file: SCREAMING_SNAKE_CASE__ : str = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = input_file.read() SCREAMING_SNAKE_CASE__ : str = regexp.search(_a ) return match def _a ( self , _a ) -> Optional[Any]: """simple docstring""" with open(_a , encoding="""utf-8""" ) as input_file: SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL ) SCREAMING_SNAKE_CASE__ : List[Any] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` SCREAMING_SNAKE_CASE__ : Dict = regexp.finditer(_a ) SCREAMING_SNAKE_CASE__ : int = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""./datasets""" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_a ) ): raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' ) def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path("""./datasets""" ) SCREAMING_SNAKE_CASE__ : List[str] = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(_a ) ): raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
12
0
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a :Optional[Any] = logging.get_logger(__name__) a :Union[str, Any] = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[Any] = """t5""" _SCREAMING_SNAKE_CASE :List[str] = ["""past_key_values"""] _SCREAMING_SNAKE_CASE :Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , _a=32_128 , _a=512 , _a=64 , _a=2_048 , _a=6 , _a=None , _a=8 , _a=32 , _a=128 , _a=0.1 , _a=1E-6 , _a=1.0 , _a="relu" , _a=True , _a=True , _a=0 , _a=1 , **_a , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size SCREAMING_SNAKE_CASE__ : Tuple = d_model SCREAMING_SNAKE_CASE__ : int = d_kv SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_ff SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_layers SCREAMING_SNAKE_CASE__ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry SCREAMING_SNAKE_CASE__ : Tuple = num_heads SCREAMING_SNAKE_CASE__ : Dict = relative_attention_num_buckets SCREAMING_SNAKE_CASE__ : str = relative_attention_max_distance SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_factor SCREAMING_SNAKE_CASE__ : Tuple = feed_forward_proj SCREAMING_SNAKE_CASE__ : str = use_cache SCREAMING_SNAKE_CASE__ : List[str] = self.feed_forward_proj.split("""-""" ) SCREAMING_SNAKE_CASE__ : Dict = act_info[-1] SCREAMING_SNAKE_CASE__ : str = act_info[0] == """gated""" if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": SCREAMING_SNAKE_CASE__ : List[Any] = """gelu_new""" super().__init__( pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , ) class __a (UpperCamelCase_): '''simple docstring''' @property def _a ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: SCREAMING_SNAKE_CASE__ : Tuple = """past_encoder_sequence + sequence""" SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch"""} SCREAMING_SNAKE_CASE__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: SCREAMING_SNAKE_CASE__ : str = {0: """batch""", 1: """decoder_sequence"""} SCREAMING_SNAKE_CASE__ : Dict = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_a , direction="""inputs""" ) return common_inputs @property def _a ( self ) -> int: """simple docstring""" return 13
718
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __a : '''simple docstring''' def __init__( self , _a , _a=99 , _a=13 , _a=7 , _a=9 , _a=True , _a=True , _a=False , _a=32 , _a=5 , _a=4 , _a=37 , _a=8 , _a=0.1 , _a=0.002 , _a=1 , _a=0 , _a=0 , _a=None , _a=None , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE__ : Tuple = encoder_seq_length SCREAMING_SNAKE_CASE__ : str = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE__ : Optional[int] = self.decoder_seq_length SCREAMING_SNAKE_CASE__ : Tuple = is_training SCREAMING_SNAKE_CASE__ : Dict = use_attention_mask SCREAMING_SNAKE_CASE__ : List[str] = use_labels SCREAMING_SNAKE_CASE__ : str = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Any = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = d_ff SCREAMING_SNAKE_CASE__ : Any = relative_attention_num_buckets SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout_rate SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id SCREAMING_SNAKE_CASE__ : List[str] = pad_token_id SCREAMING_SNAKE_CASE__ : Any = decoder_start_token_id SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : str = decoder_layers def _a ( self ) -> Tuple: """simple docstring""" return TaConfig.from_pretrained("""google/umt5-base""" ) def _a ( self , _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Any: """simple docstring""" if attention_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE__ : int = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: SCREAMING_SNAKE_CASE__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_a ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_a ) if cross_attn_head_mask is None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=_a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _a ( self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input SCREAMING_SNAKE_CASE__ : Tuple = input_ids.clamp(self.pad_token_id + 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config() SCREAMING_SNAKE_CASE__ : List[str] = config.num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_inputs_dict(_a , _a , _a ) return config, input_dict def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs() return config, inputs_dict def _a ( self ) -> List[str]: """simple docstring""" return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _a ( self ) -> List[Any]: """simple docstring""" return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = UMTaModel(config=_a ) model.to(_a ) model.eval() SCREAMING_SNAKE_CASE__ : Dict = model( input_ids=_a , decoder_input_ids=_a , attention_mask=_a , decoder_attention_mask=_a , ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(input_ids=_a , decoder_input_ids=_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = result.last_hidden_state SCREAMING_SNAKE_CASE__ : Dict = result.past_key_values SCREAMING_SNAKE_CASE__ : Any = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_a ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def _a ( self , _a , _a , _a , _a , _a , _a , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).get_decoder().to(_a ).eval() # first forward pass SCREAMING_SNAKE_CASE__ : str = model(_a , use_cache=_a ) SCREAMING_SNAKE_CASE__ : str = model(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , use_cache=_a ) self.parent.assertTrue(len(_a ) == len(_a ) ) self.parent.assertTrue(len(_a ) == len(_a ) + 1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )["""last_hidden_state"""] SCREAMING_SNAKE_CASE__ : Tuple = model(_a , past_key_values=_a )["""last_hidden_state"""] # select random slice SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) ) def _a ( self , _a , _a , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = UMTaModel(config=_a ).to(_a ).half().eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_a )["""last_hidden_state"""] self.parent.assertFalse(torch.isnan(_a ).any().item() ) @require_torch class __a (UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :Union[str, Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE :Optional[int] = (UMTaForConditionalGeneration,) if is_torch_available() else () _SCREAMING_SNAKE_CASE :List[str] = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE :Union[str, Any] = True _SCREAMING_SNAKE_CASE :Tuple = False _SCREAMING_SNAKE_CASE :Optional[Any] = False _SCREAMING_SNAKE_CASE :List[Any] = True _SCREAMING_SNAKE_CASE :List[str] = True # The small UMT5 model needs higher percentages for CPU/MP tests _SCREAMING_SNAKE_CASE :Union[str, Any] = [0.8, 0.9] def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = UMTaModelTester(self ) @unittest.skip("""Test has a segmentation fault on torch 1.8.0""" ) def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : Dict = UMTaModel(config_and_inputs[0] ).to(_a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_a ) def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""] SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : List[Any] = config_and_inputs[0] SCREAMING_SNAKE_CASE__ : Tuple = UMTaForConditionalGeneration(_a ).eval() model.to(_a ) SCREAMING_SNAKE_CASE__ : List[str] = { """head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_a ), """decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ), """cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_a ), } for attn_name, (name, mask) in zip(_a , head_masking.items() ): SCREAMING_SNAKE_CASE__ : List[str] = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": SCREAMING_SNAKE_CASE__ : str = torch.ones( config.num_decoder_layers , config.num_heads , device=_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate( config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_a , return_dict_in_generate=_a , **_a , ) # We check the state of decoder_attentions and cross_attentions just from the last step SCREAMING_SNAKE_CASE__ : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" ) def _a ( self ) -> Dict: """simple docstring""" pass @require_torch @require_sentencepiece @require_tokenizers class __a (unittest.TestCase): '''simple docstring''' @slow @unittest.skip( """Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" ) def _a ( self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_a ).to(_a ) SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_a , legacy=_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [ """Bonjour monsieur <extra_id_0> bien <extra_id_1>.""", """No se como puedo <extra_id_0>.""", """This is the reason why we <extra_id_0> them.""", """The <extra_id_0> walks in <extra_id_1>, seats""", """A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""", ] SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_a , return_tensors="""pt""" , padding=_a ).input_ids # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor( [ [ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(_a , _a ) SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(input_ids.to(_a ) ) SCREAMING_SNAKE_CASE__ : int = [ """<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""", """<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", ] SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(_a ) self.assertEqual(_a , _a )
12
0
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL a :Optional[Any] = logging.get_logger(__name__) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple[int, int]: def constraint_to_multiple_of(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=None ): SCREAMING_SNAKE_CASE__ : List[Any] = round(val / multiple ) * multiple if max_val is not None and x > max_val: SCREAMING_SNAKE_CASE__ : Optional[Any] = math.floor(val / multiple ) * multiple if x < min_val: SCREAMING_SNAKE_CASE__ : Dict = math.ceil(val / multiple ) * multiple return x SCREAMING_SNAKE_CASE__ : List[str] = (output_size, output_size) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else output_size SCREAMING_SNAKE_CASE__ : int = get_image_size(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = output_size # determine new height and width SCREAMING_SNAKE_CASE__ : Optional[Any] = output_height / input_height SCREAMING_SNAKE_CASE__ : str = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width SCREAMING_SNAKE_CASE__ : List[Any] = scale_width else: # fit height SCREAMING_SNAKE_CASE__ : List[Any] = scale_height SCREAMING_SNAKE_CASE__ : Tuple = constraint_to_multiple_of(scale_height * input_height , multiple=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__lowerCAmelCase ) return (new_height, new_width) class __a (UpperCamelCase_): '''simple docstring''' _SCREAMING_SNAKE_CASE :Union[str, Any] = ["""pixel_values"""] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ) -> None: """simple docstring""" super().__init__(**_a ) SCREAMING_SNAKE_CASE__ : int = size if size is not None else {"""height""": 384, """width""": 384} SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(_a ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize SCREAMING_SNAKE_CASE__ : Optional[int] = size SCREAMING_SNAKE_CASE__ : Tuple = keep_aspect_ratio SCREAMING_SNAKE_CASE__ : int = ensure_multiple_of SCREAMING_SNAKE_CASE__ : str = resample SCREAMING_SNAKE_CASE__ : str = do_rescale SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor SCREAMING_SNAKE_CASE__ : Any = do_normalize SCREAMING_SNAKE_CASE__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD def _a ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) SCREAMING_SNAKE_CASE__ : List[Any] = get_resize_output_image_size( _a , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_a , multiple=_a , ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def _a ( self , _a , _a , _a = None , **_a , ) -> int: """simple docstring""" return rescale(_a , scale=_a , data_format=_a , **_a ) def _a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: """simple docstring""" return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def _a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE__ : str = get_size_dict(_a ) SCREAMING_SNAKE_CASE__ : str = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio SCREAMING_SNAKE_CASE__ : Union[str, Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of SCREAMING_SNAKE_CASE__ : List[Any] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : Any = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE__ : int = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_a ) for image in images] if do_resize: SCREAMING_SNAKE_CASE__ : List[Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : List[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE__ : int = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] SCREAMING_SNAKE_CASE__ : Optional[Any] = [to_channel_dimension_format(_a , _a ) for image in images] SCREAMING_SNAKE_CASE__ : Any = {"""pixel_values""": images} return BatchFeature(data=_a , tensor_type=_a ) def _a ( self , _a , _a = None ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_a ): SCREAMING_SNAKE_CASE__ : List[Any] = target_sizes.numpy() SCREAMING_SNAKE_CASE__ : Tuple = [] for idx in range(len(_a ) ): SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = logits.argmax(dim=1 ) SCREAMING_SNAKE_CASE__ : int = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
719
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __a (UpperCamelCase_): '''simple docstring''' def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> Union[str, Any]: """simple docstring""" super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) SCREAMING_SNAKE_CASE__ : List[Any] = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : Optional[int] = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits SCREAMING_SNAKE_CASE__ : str = self.builder.as_dataset( split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class __a : '''simple docstring''' def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Any: """simple docstring""" if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) SCREAMING_SNAKE_CASE__ : int = dataset SCREAMING_SNAKE_CASE__ : Any = name SCREAMING_SNAKE_CASE__ : Optional[Any] = con SCREAMING_SNAKE_CASE__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE SCREAMING_SNAKE_CASE__ : int = num_proc SCREAMING_SNAKE_CASE__ : int = to_sql_kwargs def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.to_sql_kwargs.pop("""sql""" , _a ) SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""con""" , _a ) SCREAMING_SNAKE_CASE__ : Tuple = self.to_sql_kwargs.pop("""index""" , _a ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(index=_a , **self.to_sql_kwargs ) return written def _a ( self , _a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = args SCREAMING_SNAKE_CASE__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs SCREAMING_SNAKE_CASE__ : Any = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) SCREAMING_SNAKE_CASE__ : Optional[int] = batch.to_pandas() SCREAMING_SNAKE_CASE__ : List[Any] = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def _a ( self , _a , **_a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
12
0
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch a :List[str] = True except ImportError: a :Tuple = False try: from torch.hub import _get_torch_home a :Optional[int] = _get_torch_home() except ImportError: a :List[Any] = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) a :Optional[Any] = os.path.join(torch_cache_home, "transformers") a :Union[str, Any] = "https://cdn.huggingface.co" a :str = "https://s3.amazonaws.com/models.huggingface.co/bert" a :str = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) a :Optional[Any] = os.path.join(PATH, "config.yaml") a :Any = os.path.join(PATH, "attributes.txt") a :Optional[Any] = os.path.join(PATH, "objects.txt") a :Tuple = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) a :Optional[Any] = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) a :Any = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) a :Optional[int] = "pytorch_model.bin" a :Dict = "config.yaml" def _lowercase ( __lowerCAmelCase=OBJECTS , __lowerCAmelCase=ATTRIBUTES ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[int] = [] with open(__lowerCAmelCase ) as f: for object in f.readlines(): vg_classes.append(object.split(""",""" )[0].lower().strip() ) SCREAMING_SNAKE_CASE__ : Optional[int] = [] with open(__lowerCAmelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split(""",""" )[0].lower().strip() ) return vg_classes, vg_attrs def _lowercase ( __lowerCAmelCase ) -> Dict: SCREAMING_SNAKE_CASE__ : str = OrderedDict() with open(__lowerCAmelCase , """rb""" ) as f: SCREAMING_SNAKE_CASE__ : int = pkl.load(__lowerCAmelCase )["""model"""] for k in copy.deepcopy(list(ckp.keys() ) ): SCREAMING_SNAKE_CASE__ : List[Any] = ckp.pop(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , np.ndarray ): SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(__lowerCAmelCase ) else: assert isinstance(__lowerCAmelCase , torch.tensor ), type(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = v return r class __a : '''simple docstring''' _SCREAMING_SNAKE_CASE :str = {} def __init__( self , _a , _a = "root" , _a=0 ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = name SCREAMING_SNAKE_CASE__ : Optional[Any] = level SCREAMING_SNAKE_CASE__ : Tuple = {} for k, v in dictionary.items(): if v is None: raise ValueError() SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(_a ) SCREAMING_SNAKE_CASE__ : List[str] = copy.deepcopy(_a ) if isinstance(_a , _a ): SCREAMING_SNAKE_CASE__ : Tuple = Config(_a , name=_a , level=level + 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = v setattr(self , _a , _a ) SCREAMING_SNAKE_CASE__ : Tuple = d def __repr__( self ) -> Optional[int]: """simple docstring""" return str(list((self._pointer.keys()) ) ) def __setattr__( self , _a , _a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = val SCREAMING_SNAKE_CASE__ : str = val SCREAMING_SNAKE_CASE__ : str = key.split(""".""" ) SCREAMING_SNAKE_CASE__ : str = len(_a ) - 1 SCREAMING_SNAKE_CASE__ : List[str] = self._pointer if len(_a ) > 1: for i, l in enumerate(_a ): if hasattr(self , _a ) and isinstance(getattr(self , _a ) , _a ): setattr(getattr(self , _a ) , """.""".join(levels[i:] ) , _a ) if l == last_level: SCREAMING_SNAKE_CASE__ : Any = val else: SCREAMING_SNAKE_CASE__ : str = pointer[l] def _a ( self ) -> List[str]: """simple docstring""" return self._pointer def _a ( self , _a , _a ) -> Dict: """simple docstring""" with open(f'''{file_name}''' , """w""" ) as stream: dump(_a , _a ) def _a ( self , _a , _a ) -> int: """simple docstring""" with open(f'''{file_name}''' , """w""" ) as stream: json.dump(_a , _a ) @staticmethod def _a ( _a ) -> Optional[Any]: """simple docstring""" with open(_a ) as stream: SCREAMING_SNAKE_CASE__ : int = load(_a , Loader=_a ) return data def __str__( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = """ """ if self._name != "root": SCREAMING_SNAKE_CASE__ : List[Any] = f'''{t * (self._level-1)}{self._name}:\n''' else: SCREAMING_SNAKE_CASE__ : Tuple = """""" SCREAMING_SNAKE_CASE__ : Optional[int] = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(_a , _a ): r += f'''{t * (self._level)}{v}\n''' self._level += 1 else: r += f'''{t * (self._level)}{k}: {v} ({type(_a ).__name__})\n''' SCREAMING_SNAKE_CASE__ : str = level return r[:-1] @classmethod def _a ( cls , _a , **_a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = cls.get_config_dict(_a , **_a ) return cls(_a ) @classmethod def _a ( cls , _a , **_a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("""cache_dir""" , _a ) SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop("""force_download""" , _a ) SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("""resume_download""" , _a ) SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("""proxies""" , _a ) SCREAMING_SNAKE_CASE__ : str = kwargs.pop("""local_files_only""" , _a ) if os.path.isdir(_a ): SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_a , _a ) elif os.path.isfile(_a ) or is_remote_url(_a ): SCREAMING_SNAKE_CASE__ : List[str] = pretrained_model_name_or_path else: SCREAMING_SNAKE_CASE__ : str = hf_bucket_url(_a , filename=_a , use_cdn=_a ) try: # Load from URL or cache if already cached SCREAMING_SNAKE_CASE__ : Any = cached_path( _a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , ) # Load config dict if resolved_config_file is None: raise EnvironmentError SCREAMING_SNAKE_CASE__ : List[str] = Config.load_yaml(_a ) except EnvironmentError: SCREAMING_SNAKE_CASE__ : Tuple = """Can't load config for""" raise EnvironmentError(_a ) if resolved_config_file == config_file: print("""loading configuration file from path""" ) else: print("""loading configuration file cache""" ) return Config.load_yaml(_a ), kwargs def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : int = torch.load("""dump.pt""" , map_location=in_tensor.device ) SCREAMING_SNAKE_CASE__ : Dict = in_tensor.numpy() SCREAMING_SNAKE_CASE__ : Any = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ), ( F'''{sum([1 for x in np.isclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception("""tensors are all good""" ) # Hugging face functions below def _lowercase ( __lowerCAmelCase ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[Any] = urlparse(__lowerCAmelCase ) return parsed.scheme in ("http", "https") def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> str: SCREAMING_SNAKE_CASE__ : List[str] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX SCREAMING_SNAKE_CASE__ : Tuple = """/""" not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=0 , __lowerCAmelCase=None , ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] = """python/{}""".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): ua += "; " + "; ".join("""{}/{}""".format(__lowerCAmelCase , __lowerCAmelCase ) for k, v in user_agent.items() ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): ua += "; " + user_agent SCREAMING_SNAKE_CASE__ : List[str] = {"""user-agent""": ua} if resume_size > 0: SCREAMING_SNAKE_CASE__ : Dict = """bytes=%d-""" % (resume_size,) SCREAMING_SNAKE_CASE__ : Optional[int] = requests.get(__lowerCAmelCase , stream=__lowerCAmelCase , proxies=__lowerCAmelCase , headers=__lowerCAmelCase ) if response.status_code == 416: # Range not satisfiable return SCREAMING_SNAKE_CASE__ : Optional[int] = response.headers.get("""Content-Length""" ) SCREAMING_SNAKE_CASE__ : Any = resume_size + int(__lowerCAmelCase ) if content_length is not None else None SCREAMING_SNAKE_CASE__ : int = tqdm( unit="""B""" , unit_scale=__lowerCAmelCase , total=__lowerCAmelCase , initial=__lowerCAmelCase , desc="""Downloading""" , ) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(__lowerCAmelCase ) ) temp_file.write(__lowerCAmelCase ) progress.close() def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=10 , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , ) -> Union[str, Any]: if cache_dir is None: SCREAMING_SNAKE_CASE__ : Optional[Any] = TRANSFORMERS_CACHE if isinstance(__lowerCAmelCase , __lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : List[Any] = str(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = None if not local_files_only: try: SCREAMING_SNAKE_CASE__ : Dict = requests.head(__lowerCAmelCase , allow_redirects=__lowerCAmelCase , proxies=__lowerCAmelCase , timeout=__lowerCAmelCase ) if response.status_code == 200: SCREAMING_SNAKE_CASE__ : List[str] = response.headers.get("""ETag""" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass SCREAMING_SNAKE_CASE__ : Tuple = url_to_filename(__lowerCAmelCase , __lowerCAmelCase ) # get cache path to put the file SCREAMING_SNAKE_CASE__ : Any = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(__lowerCAmelCase ): return cache_path else: SCREAMING_SNAKE_CASE__ : List[Any] = [ file for file in fnmatch.filter(os.listdir(__lowerCAmelCase ) , filename + """.*""" ) if not file.endswith(""".json""" ) and not file.endswith(""".lock""" ) ] if len(__lowerCAmelCase ) > 0: return os.path.join(__lowerCAmelCase , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( """Cannot find the requested files in the cached path and outgoing traffic has been""" """ disabled. To enable model look-ups and downloads online, set 'local_files_only'""" """ to False.""" ) return None # From now on, etag is not None. if os.path.exists(__lowerCAmelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. SCREAMING_SNAKE_CASE__ : List[Any] = cache_path + """.lock""" with FileLock(__lowerCAmelCase ): # If the download just completed while the lock was activated. if os.path.exists(__lowerCAmelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: SCREAMING_SNAKE_CASE__ : Dict = cache_path + """.incomplete""" @contextmanager def _resumable_file_manager(): with open(__lowerCAmelCase , """a+b""" ) as f: yield f SCREAMING_SNAKE_CASE__ : Tuple = _resumable_file_manager if os.path.exists(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Dict = os.stat(__lowerCAmelCase ).st_size else: SCREAMING_SNAKE_CASE__ : str = 0 else: SCREAMING_SNAKE_CASE__ : Optional[Any] = partial(tempfile.NamedTemporaryFile , dir=__lowerCAmelCase , delete=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( """%s not found in cache or force_download set to True, downloading to %s""" , __lowerCAmelCase , temp_file.name , ) http_get( __lowerCAmelCase , __lowerCAmelCase , proxies=__lowerCAmelCase , resume_size=__lowerCAmelCase , user_agent=__lowerCAmelCase , ) os.replace(temp_file.name , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = {"""url""": url, """etag""": etag} SCREAMING_SNAKE_CASE__ : Optional[Any] = cache_path + """.json""" with open(__lowerCAmelCase , """w""" ) as meta_file: json.dump(__lowerCAmelCase , __lowerCAmelCase ) return cache_path def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] = url.encode("""utf-8""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = shaaaa(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = url_hash.hexdigest() if etag: SCREAMING_SNAKE_CASE__ : Union[str, Any] = etag.encode("""utf-8""" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = shaaaa(__lowerCAmelCase ) filename += "." + etag_hash.hexdigest() if url.endswith(""".h5""" ): filename += ".h5" return filename def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> List[str]: if cache_dir is None: SCREAMING_SNAKE_CASE__ : str = TRANSFORMERS_CACHE if isinstance(__lowerCAmelCase , __lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Tuple = str(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : int = str(__lowerCAmelCase ) if is_remote_url(__lowerCAmelCase ): # URL, so get it from the cache (downloading if necessary) SCREAMING_SNAKE_CASE__ : str = get_from_cache( __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , user_agent=__lowerCAmelCase , local_files_only=__lowerCAmelCase , ) elif os.path.exists(__lowerCAmelCase ): # File, and it exists. SCREAMING_SNAKE_CASE__ : Optional[Any] = url_or_filename elif urlparse(__lowerCAmelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("""file {} not found""".format(__lowerCAmelCase ) ) else: # Something unknown raise ValueError("""unable to parse {} as a URL or as a local path""".format(__lowerCAmelCase ) ) if extract_compressed_file: if not is_zipfile(__lowerCAmelCase ) and not tarfile.is_tarfile(__lowerCAmelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" SCREAMING_SNAKE_CASE__ : Any = os.path.split(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = output_file.replace(""".""" , """-""" ) + """-extracted""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if os.path.isdir(__lowerCAmelCase ) and os.listdir(__lowerCAmelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions SCREAMING_SNAKE_CASE__ : Tuple = output_path + """.lock""" with FileLock(__lowerCAmelCase ): shutil.rmtree(__lowerCAmelCase , ignore_errors=__lowerCAmelCase ) os.makedirs(__lowerCAmelCase ) if is_zipfile(__lowerCAmelCase ): with ZipFile(__lowerCAmelCase , """r""" ) as zip_file: zip_file.extractall(__lowerCAmelCase ) zip_file.close() elif tarfile.is_tarfile(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Tuple = tarfile.open(__lowerCAmelCase ) tar_file.extractall(__lowerCAmelCase ) tar_file.close() else: raise EnvironmentError("""Archive format of {} could not be identified""".format(__lowerCAmelCase ) ) return output_path_extracted return output_path def _lowercase ( __lowerCAmelCase , __lowerCAmelCase="," ) -> Optional[Any]: assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] = eval(f.read() ) else: SCREAMING_SNAKE_CASE__ : str = requests.get(__lowerCAmelCase ) try: SCREAMING_SNAKE_CASE__ : Dict = requests.json() except Exception: SCREAMING_SNAKE_CASE__ : Union[str, Any] = req.content.decode() assert data is not None, "could not connect" try: SCREAMING_SNAKE_CASE__ : Dict = eval(__lowerCAmelCase ) except Exception: SCREAMING_SNAKE_CASE__ : Dict = data.split("""\n""" ) req.close() return data def _lowercase ( __lowerCAmelCase ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[Any] = requests.get(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : str = np.array(Image.open(BytesIO(response.content ) ) ) return img def _lowercase ( __lowerCAmelCase ) -> int: SCREAMING_SNAKE_CASE__ : str = url.split("""/""" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(__lowerCAmelCase ) with open(__lowerCAmelCase , """rb""" ) as stream: SCREAMING_SNAKE_CASE__ : List[str] = pkl.load(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Dict = weights.pop("""model""" ) SCREAMING_SNAKE_CASE__ : Any = {} for k, v in model.items(): SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.from_numpy(__lowerCAmelCase ) if "running_var" in k: SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([0] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = k.replace("""running_var""" , """num_batches_tracked""" ) SCREAMING_SNAKE_CASE__ : int = zero return new def _lowercase ( ) -> Optional[Any]: print(F'''{os.path.abspath(os.path.join(__lowerCAmelCase , os.pardir ) )}/demo.ipynb''' ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase="RGB" ) -> Union[str, Any]: assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) if os.path.isfile(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = cva.imread(__lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__ : List[Any] = get_image_from_url(__lowerCAmelCase ) assert img is not None, F'''could not connect to: {im}''' SCREAMING_SNAKE_CASE__ : Dict = cva.cvtColor(__lowerCAmelCase , cva.COLOR_BGR2RGB ) if input_format == "RGB": SCREAMING_SNAKE_CASE__ : Optional[Any] = img[:, :, ::-1] return img def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=1 ) -> str: return (images[i : i + batch] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ))
720
"""simple docstring""" def _lowercase ( __lowerCAmelCase ) -> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 SCREAMING_SNAKE_CASE__ : List[Any] = 1 SCREAMING_SNAKE_CASE__ : int = 1 while repunit: SCREAMING_SNAKE_CASE__ : str = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def _lowercase ( __lowerCAmelCase = 100_0000 ) -> int: SCREAMING_SNAKE_CASE__ : Dict = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__lowerCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'{solution() = }')
12
0
"""simple docstring""" import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __a (UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :List[Any] = LxmertTokenizer _SCREAMING_SNAKE_CASE :List[Any] = LxmertTokenizerFast _SCREAMING_SNAKE_CASE :Optional[Any] = True _SCREAMING_SNAKE_CASE :List[Any] = True def _a ( self ) -> Union[str, Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : str = [ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _a ( self , _a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = """UNwant\u00E9d,running""" SCREAMING_SNAKE_CASE__ : str = """unwanted, running""" return input_text, output_text def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] ) def _a ( self ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : List[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Any = """I was born in 92000, and this is falsé.""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.tokenize(_a ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(_a , add_special_tokens=_a ) SCREAMING_SNAKE_CASE__ : int = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) SCREAMING_SNAKE_CASE__ : List[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(_a ) SCREAMING_SNAKE_CASE__ : int = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a )
721
"""simple docstring""" import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a :Union[str, Any] = logging.getLogger(__name__) @dataclass class __a : '''simple docstring''' _SCREAMING_SNAKE_CASE :Optional[int] = field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""}) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) _SCREAMING_SNAKE_CASE :Optional[int] = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) _SCREAMING_SNAKE_CASE :Optional[int] = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) _SCREAMING_SNAKE_CASE :Optional[int] = field( default=UpperCamelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) @dataclass class __a : '''simple docstring''' _SCREAMING_SNAKE_CASE :str = field( default=UpperCamelCase_ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) _SCREAMING_SNAKE_CASE :str = field( default=UpperCamelCase_ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""}) _SCREAMING_SNAKE_CASE :Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """Train language if it is different from the evaluation language."""}) _SCREAMING_SNAKE_CASE :Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) _SCREAMING_SNAKE_CASE :Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""}) _SCREAMING_SNAKE_CASE :Optional[str] = field( default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) _SCREAMING_SNAKE_CASE :Optional[bool] = field( default=UpperCamelCase_ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , ) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) _SCREAMING_SNAKE_CASE :str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) _SCREAMING_SNAKE_CASE :bool = field( default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def _lowercase ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_xnli""" , __lowerCAmelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : List[Any] = training_args.get_process_log_level() logger.setLevel(__lowerCAmelCase ) datasets.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. SCREAMING_SNAKE_CASE__ : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: SCREAMING_SNAKE_CASE__ : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset( """xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: SCREAMING_SNAKE_CASE__ : str = load_dataset( """xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = train_dataset.features["""label"""].names if training_args.do_eval: SCREAMING_SNAKE_CASE__ : int = load_dataset( """xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.features["""label"""].names if training_args.do_predict: SCREAMING_SNAKE_CASE__ : int = load_dataset( """xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.features["""label"""].names # Labels SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel={str(__lowerCAmelCase ): label for i, label in enumerate(__lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) SCREAMING_SNAKE_CASE__ : str = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: SCREAMING_SNAKE_CASE__ : str = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch SCREAMING_SNAKE_CASE__ : Optional[Any] = False def preprocess_function(__lowerCAmelCase ): # Tokenize the texts return tokenizer( examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCAmelCase , max_length=data_args.max_seq_length , truncation=__lowerCAmelCase , ) if training_args.do_train: if data_args.max_train_samples is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] = min(len(__lowerCAmelCase ) , data_args.max_train_samples ) SCREAMING_SNAKE_CASE__ : str = train_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): SCREAMING_SNAKE_CASE__ : List[str] = train_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , ) # Log a few random samples from the training set: for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ): logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' ) if training_args.do_eval: if data_args.max_eval_samples is not None: SCREAMING_SNAKE_CASE__ : Any = min(len(__lowerCAmelCase ) , data_args.max_eval_samples ) SCREAMING_SNAKE_CASE__ : List[Any] = eval_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): SCREAMING_SNAKE_CASE__ : List[str] = eval_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: SCREAMING_SNAKE_CASE__ : int = min(len(__lowerCAmelCase ) , data_args.max_predict_samples ) SCREAMING_SNAKE_CASE__ : List[Any] = predict_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ): SCREAMING_SNAKE_CASE__ : Tuple = predict_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , ) # Get the metric function SCREAMING_SNAKE_CASE__ : Optional[Any] = evaluate.load("""xnli""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Dict = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.argmax(__lowerCAmelCase , axis=1 ) return metric.compute(predictions=__lowerCAmelCase , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: SCREAMING_SNAKE_CASE__ : List[Any] = default_data_collator elif training_args.fpaa: SCREAMING_SNAKE_CASE__ : int = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # Initialize our Trainer SCREAMING_SNAKE_CASE__ : Union[str, Any] = Trainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , ) # Training if training_args.do_train: SCREAMING_SNAKE_CASE__ : Dict = None if training_args.resume_from_checkpoint is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_checkpoint SCREAMING_SNAKE_CASE__ : str = trainer.train(resume_from_checkpoint=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[Any] = train_result.metrics SCREAMING_SNAKE_CASE__ : Optional[int] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Dict = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __lowerCAmelCase ) trainer.save_metrics("""train""" , __lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) SCREAMING_SNAKE_CASE__ : Any = trainer.evaluate(eval_dataset=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.log_metrics("""eval""" , __lowerCAmelCase ) trainer.save_metrics("""eval""" , __lowerCAmelCase ) # Prediction if training_args.do_predict: logger.info("""*** Predict ***""" ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = trainer.predict(__lowerCAmelCase , metric_key_prefix="""predict""" ) SCREAMING_SNAKE_CASE__ : List[str] = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : int = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.log_metrics("""predict""" , __lowerCAmelCase ) trainer.save_metrics("""predict""" , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = np.argmax(__lowerCAmelCase , axis=1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , """predictions.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCAmelCase , """w""" ) as writer: writer.write("""index\tprediction\n""" ) for index, item in enumerate(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Optional[int] = label_list[item] writer.write(F'''{index}\t{item}\n''' ) if __name__ == "__main__": main()
12
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class __a ( _snake_case ): __UpperCamelCase : Tuple = 'sew' def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,): '''simple docstring''' super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = feat_extract_norm __SCREAMING_SNAKE_CASE = feat_extract_activation __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = conv_bias __SCREAMING_SNAKE_CASE = num_conv_pos_embeddings __SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups __SCREAMING_SNAKE_CASE = len(self.conv_dim ) __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = squeeze_factor __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = feat_proj_dropout __SCREAMING_SNAKE_CASE = final_dropout __SCREAMING_SNAKE_CASE = layerdrop __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __SCREAMING_SNAKE_CASE = apply_spec_augment __SCREAMING_SNAKE_CASE = mask_time_prob __SCREAMING_SNAKE_CASE = mask_time_length __SCREAMING_SNAKE_CASE = mask_time_min_masks __SCREAMING_SNAKE_CASE = mask_feature_prob __SCREAMING_SNAKE_CASE = mask_feature_length __SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss __SCREAMING_SNAKE_CASE = ctc_loss_reduction __SCREAMING_SNAKE_CASE = ctc_zero_infinity # sequence classification __SCREAMING_SNAKE_CASE = use_weighted_layer_sum __SCREAMING_SNAKE_CASE = classifier_proj_size @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return functools.reduce(operator.mul ,self.conv_stride ,1 )
13
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : List[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape __SCREAMING_SNAKE_CASE = jax.image.resize( lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,) __SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : List[str] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : int = None __UpperCamelCase : float = 0.0 __UpperCamelCase : bool = None __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels __SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype ) __SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob ) __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __SCREAMING_SNAKE_CASE = None if use_nin_shortcut: __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,) def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' __SCREAMING_SNAKE_CASE = hidden_states __SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) ) __SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 ) __SCREAMING_SNAKE_CASE = hidden_states + temb __SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase ) if self.conv_shortcut is not None: __SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase ) return hidden_states + residual
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import sys from collections import defaultdict class __a : def __init__( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pos def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: __SCREAMING_SNAKE_CASE = 2 * start + 2 if heap[smallest_child] < heap[start]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( heap[start], positions[start], ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa __SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,lowerCamelCase ) self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = position[index] while index != 0: __SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __SCREAMING_SNAKE_CASE = heap[parent] __SCREAMING_SNAKE_CASE = position[parent] self.set_position(position[parent] ,lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,lowerCamelCase ) break __SCREAMING_SNAKE_CASE = parent else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,0 ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1 for i in range(lowerCamelCase ,-1 ,-1 ): self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = positions[0] __SCREAMING_SNAKE_CASE = sys.maxsize self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase ) return temp def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = Heap() __SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex __SCREAMING_SNAKE_CASE = [] for vertex in range(len(__UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__UpperCAmelCase ) heap.node_position.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = sys.maxsize for neighbor, distance in adjacency_list[0]: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = distance heap.heapify(__UpperCAmelCase , __UpperCAmelCase ) for _ in range(1 , len(__UpperCAmelCase ) ): __SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __SCREAMING_SNAKE_CASE = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__UpperCAmelCase )] ): __SCREAMING_SNAKE_CASE = distance heap.bottom_to_top( __UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a = int(input("Enter number of edges: ").strip()) a = defaultdict(list) for _ in range(edges_number): a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __a ( _snake_case ): __UpperCamelCase : Tuple = 'char' __UpperCamelCase : Optional[Any] = 'bpe' __UpperCamelCase : Tuple = 'wp' a = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __a ( _snake_case ): __UpperCamelCase : int = ['image_processor', 'char_tokenizer'] __UpperCamelCase : int = 'ViTImageProcessor' __UpperCamelCase : Optional[Any] = 'MgpstrTokenizer' def __init__( self : Union[str, Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : int=None ,**lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,lowerCamelCase ,) __SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" ) __SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) __SCREAMING_SNAKE_CASE = tokenizer __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""gpt2""" ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(lowerCamelCase ,lowerCamelCase ) def __call__( self : int ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : int=None ,lowerCamelCase : Any=None ,**lowerCamelCase : Any ): '''simple docstring''' if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: __SCREAMING_SNAKE_CASE = self.image_processor(lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase ) if text is not None: __SCREAMING_SNAKE_CASE = self.char_tokenizer(lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase ) if text is None: return inputs elif images is None: return encodings else: __SCREAMING_SNAKE_CASE = encodings["""input_ids"""] return inputs def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequences __SCREAMING_SNAKE_CASE = char_preds.size(0 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._decode_helper(lowerCamelCase ,"""char""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._decode_helper(lowerCamelCase ,"""bpe""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._decode_helper(lowerCamelCase ,"""wp""" ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for i in range(lowerCamelCase ): __SCREAMING_SNAKE_CASE = [char_scores[i], bpe_scores[i], wp_scores[i]] __SCREAMING_SNAKE_CASE = [char_strs[i], bpe_strs[i], wp_strs[i]] __SCREAMING_SNAKE_CASE = scores.index(max(lowerCamelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = final_strs __SCREAMING_SNAKE_CASE = final_scores __SCREAMING_SNAKE_CASE = char_strs __SCREAMING_SNAKE_CASE = bpe_strs __SCREAMING_SNAKE_CASE = wp_strs return out def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : str ): '''simple docstring''' if format == DecodeType.CHARACTER: __SCREAMING_SNAKE_CASE = self.char_decode __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = """[s]""" elif format == DecodeType.BPE: __SCREAMING_SNAKE_CASE = self.bpe_decode __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = """#""" elif format == DecodeType.WORDPIECE: __SCREAMING_SNAKE_CASE = self.wp_decode __SCREAMING_SNAKE_CASE = 102 __SCREAMING_SNAKE_CASE = """[SEP]""" else: raise ValueError(f"""Format {format} is not supported.""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], [] __SCREAMING_SNAKE_CASE = pred_logits.size(0 ) __SCREAMING_SNAKE_CASE = pred_logits.size(1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pred_logits.topk(1 ,dim=-1 ,largest=lowerCamelCase ,sorted=lowerCamelCase ) __SCREAMING_SNAKE_CASE = preds_index.view(-1 ,lowerCamelCase )[:, 1:] __SCREAMING_SNAKE_CASE = decoder(lowerCamelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.nn.functional.softmax(lowerCamelCase ,dim=2 ).max(dim=2 ) __SCREAMING_SNAKE_CASE = preds_max_prob[:, 1:] for index in range(lowerCamelCase ): __SCREAMING_SNAKE_CASE = preds_str[index].find(lowerCamelCase ) __SCREAMING_SNAKE_CASE = preds_str[index][:pred_eos] __SCREAMING_SNAKE_CASE = preds_index[index].cpu().tolist() __SCREAMING_SNAKE_CASE = pred_index.index(lowerCamelCase ) if eos_token in pred_index else -1 __SCREAMING_SNAKE_CASE = preds_max_prob[index][: pred_eos_index + 1] __SCREAMING_SNAKE_CASE = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCamelCase ) conf_scores.append(lowerCamelCase ) return dec_strs, conf_scores def UpperCAmelCase__ ( self : str ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [seq.replace(""" """ ,"""""" ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase )] return decode_strs def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(lowerCamelCase ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [seq.replace(""" """ ,"""""" ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase )] return decode_strs
13
'''simple docstring''' import os import string import sys a = 1 << 8 a = { "tab": ord("\t"), "newline": ord("\r"), "esc": 27, "up": 65 + ARROW_KEY_FLAG, "down": 66 + ARROW_KEY_FLAG, "right": 67 + ARROW_KEY_FLAG, "left": 68 + ARROW_KEY_FLAG, "mod_int": 91, "undefined": sys.maxsize, "interrupt": 3, "insert": 50, "delete": 51, "pg_up": 53, "pg_down": 54, } a = KEYMAP["up"] a = KEYMAP["left"] if sys.platform == "win32": a = [] a = { b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, } for i in range(10): a = ord(str(i)) def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' if os.name == "nt": import msvcrt __SCREAMING_SNAKE_CASE = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(__UpperCAmelCase ) == 0: # Read the keystroke __SCREAMING_SNAKE_CASE = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __SCREAMING_SNAKE_CASE = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(__UpperCAmelCase ) if ord(__UpperCAmelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) __SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] ) except KeyError: __SCREAMING_SNAKE_CASE = cha[1] else: __SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __SCREAMING_SNAKE_CASE = sys.stdin.fileno() __SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase ) try: tty.setraw(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = sys.stdin.read(1 ) finally: termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase ) return ch def __magic_name__ ( ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(__UpperCAmelCase ) == KEYMAP["esc"]: __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]: __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
13
1
'''simple docstring''' from __future__ import annotations a = 8.9_8_8E9 # units = N * m^s * C^-2 def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict[str, float]: '''simple docstring''' __SCREAMING_SNAKE_CASE = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if distance < 0: raise ValueError("""Distance cannot be negative""" ) if force == 0: __SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(__UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: __SCREAMING_SNAKE_CASE = abs(__UpperCAmelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: __SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(__UpperCAmelCase )) ** 0.5 return {"distance": distance} raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' from __future__ import annotations import bisect def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' if hi < 0: __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) while lo < hi: __SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __SCREAMING_SNAKE_CASE = mid + 1 else: __SCREAMING_SNAKE_CASE = mid return lo def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' if hi < 0: __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) while lo < hi: __SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __SCREAMING_SNAKE_CASE = mid + 1 else: __SCREAMING_SNAKE_CASE = mid return lo def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1 while left <= right: __SCREAMING_SNAKE_CASE = left + (right - left) // 2 __SCREAMING_SNAKE_CASE = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __SCREAMING_SNAKE_CASE = midpoint - 1 else: __SCREAMING_SNAKE_CASE = midpoint + 1 return None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' __SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase ) if index != len(__UpperCAmelCase ) and sorted_collection[index] == item: return index return None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' if right < left: return None __SCREAMING_SNAKE_CASE = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 ) else: return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase ) if __name__ == "__main__": a = input("Enter numbers separated by comma:\n").strip() a = sorted(int(item) for item in user_input.split(",")) a = int(input("Enter a single number to be found in the list:\n")) a = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
13
1
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path a = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) a = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} a = "zero2" a = "zero3" a = [ZEROa, ZEROa] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = parameterized.to_safe_name("""_""".join(str(__UpperCAmelCase ) for x in param.args ) ) return f"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test a = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __a ( _snake_case ): @parameterized.expand(lowerCamelCase ,name_func=lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ): '''simple docstring''' self.run_and_check( stage=lowerCamelCase ,model=lowerCamelCase ,distributed=lowerCamelCase ,fpaa=lowerCamelCase ,) @require_torch_multi_gpu @parameterized.expand(lowerCamelCase ,name_func=lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ): '''simple docstring''' self.run_and_check( stage=lowerCamelCase ,model=lowerCamelCase ,distributed=lowerCamelCase ,fpaa=lowerCamelCase ,) @parameterized.expand(lowerCamelCase ,name_func=lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' self.run_and_check( stage=lowerCamelCase ,model=lowerCamelCase ,distributed=lowerCamelCase ,fpaa=lowerCamelCase ,) @require_torch_multi_gpu @parameterized.expand(lowerCamelCase ,name_func=lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ): '''simple docstring''' self.run_and_check( stage=lowerCamelCase ,model=lowerCamelCase ,distributed=lowerCamelCase ,fpaa=lowerCamelCase ,) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Dict ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : int = 10 ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = models[model] __SCREAMING_SNAKE_CASE = self.run_trainer( stage=lowerCamelCase ,model_name=lowerCamelCase ,eval_steps=lowerCamelCase ,num_train_epochs=1 ,distributed=lowerCamelCase ,fpaa=lowerCamelCase ,) self.do_checks(lowerCamelCase ) return output_dir def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : int = 10 ,lowerCamelCase : int = 1 ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir("""./xxx""" ,after=lowerCamelCase ) __SCREAMING_SNAKE_CASE = f""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowerCamelCase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(["""--fp16"""] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __SCREAMING_SNAKE_CASE = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __SCREAMING_SNAKE_CASE = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __SCREAMING_SNAKE_CASE = self.get_launcher(lowerCamelCase ) __SCREAMING_SNAKE_CASE = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCamelCase ,env=self.get_env() ) return output_dir def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any]=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = min(2 ,get_gpu_count() ) if distributed else 1 return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
13
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging a = logging.get_logger(__name__) class __a ( _snake_case ): __UpperCamelCase : int = 'linear' __UpperCamelCase : Tuple = 'cosine' __UpperCamelCase : Tuple = 'cosine_with_restarts' __UpperCamelCase : List[Any] = 'polynomial' __UpperCamelCase : Optional[Any] = 'constant' __UpperCamelCase : Optional[int] = 'constant_with_warmup' __UpperCamelCase : List[Any] = 'piecewise_constant' def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) ) return 1.0 return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" ) __SCREAMING_SNAKE_CASE = int(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = float(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = value __SCREAMING_SNAKE_CASE = float(rule_list[-1] ) def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ): def rule_func(__UpperCAmelCase ) -> float: __SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__UpperCAmelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __SCREAMING_SNAKE_CASE = lr_init - lr_end __SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps __SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps __SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) a = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
13
1
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers a = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' require_version(deps[pkg] , __UpperCAmelCase )
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' a = [0, 2, 4, 6, 8] a = [1, 3, 5, 7, 9] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 __SCREAMING_SNAKE_CASE = 0 for digit in range(10 ): __SCREAMING_SNAKE_CASE = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __UpperCAmelCase , __UpperCAmelCase ) return result __SCREAMING_SNAKE_CASE = 0 for digita in range(10 ): __SCREAMING_SNAKE_CASE = digita if (remainder + digita) % 2 == 0: __SCREAMING_SNAKE_CASE = ODD_DIGITS else: __SCREAMING_SNAKE_CASE = EVEN_DIGITS for digita in other_parity_digits: __SCREAMING_SNAKE_CASE = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCAmelCase , __UpperCAmelCase , ) return result def __magic_name__ ( __UpperCAmelCase = 9 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__UpperCAmelCase , 0 , [0] * length , __UpperCAmelCase ) return result if __name__ == "__main__": print(F'''{solution() = }''')
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" ) __SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""" , class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
13
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} a = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } a = { "allenai/longformer-base-4096": 4096, "allenai/longformer-large-4096": 4096, "allenai/longformer-large-4096-finetuned-triviaqa": 4096, "allenai/longformer-base-4096-extra.pos.embd.only": 4096, "allenai/longformer-large-4096-extra.pos.embd.only": 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __magic_name__ ( ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) __SCREAMING_SNAKE_CASE = bs[:] __SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCAmelCase ) cs.append(2**8 + n ) n += 1 __SCREAMING_SNAKE_CASE = [chr(__UpperCAmelCase ) for n in cs] return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = set() __SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __SCREAMING_SNAKE_CASE = char return pairs class __a ( _snake_case ): __UpperCamelCase : Dict = VOCAB_FILES_NAMES __UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : List[str] = ['input_ids', 'attention_mask'] def __init__( self : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple="replace" ,lowerCamelCase : Tuple="<s>" ,lowerCamelCase : List[Any]="</s>" ,lowerCamelCase : Optional[Any]="</s>" ,lowerCamelCase : Optional[int]="<s>" ,lowerCamelCase : Optional[int]="<unk>" ,lowerCamelCase : List[Any]="<pad>" ,lowerCamelCase : Dict="<mask>" ,lowerCamelCase : List[str]=False ,**lowerCamelCase : Any ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ) if isinstance(lowerCamelCase ,lowerCamelCase ) else bos_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ) if isinstance(lowerCamelCase ,lowerCamelCase ) else eos_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ) if isinstance(lowerCamelCase ,lowerCamelCase ) else sep_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ) if isinstance(lowerCamelCase ,lowerCamelCase ) else cls_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ) if isinstance(lowerCamelCase ,lowerCamelCase ) else unk_token __SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ) if isinstance(lowerCamelCase ,lowerCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase ,lstrip=lowerCamelCase ,rstrip=lowerCamelCase ) if isinstance(lowerCamelCase ,lowerCamelCase ) else mask_token super().__init__( errors=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,unk_token=lowerCamelCase ,sep_token=lowerCamelCase ,cls_token=lowerCamelCase ,pad_token=lowerCamelCase ,mask_token=lowerCamelCase ,add_prefix_space=lowerCamelCase ,**lowerCamelCase ,) with open(lowerCamelCase ,encoding="""utf-8""" ) as vocab_handle: __SCREAMING_SNAKE_CASE = json.load(lowerCamelCase ) __SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} __SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding __SCREAMING_SNAKE_CASE = bytes_to_unicode() __SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase ,encoding="""utf-8""" ) as merges_handle: __SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""" )[1:-1] __SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] __SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __SCREAMING_SNAKE_CASE = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return len(self.encoder ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : List[Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __SCREAMING_SNAKE_CASE = tuple(lowerCamelCase ) __SCREAMING_SNAKE_CASE = get_pairs(lowerCamelCase ) if not pairs: return token while True: __SCREAMING_SNAKE_CASE = min(lowerCamelCase ,key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 while i < len(lowerCamelCase ): try: __SCREAMING_SNAKE_CASE = word.index(lowerCamelCase ,lowerCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __SCREAMING_SNAKE_CASE = tuple(lowerCamelCase ) __SCREAMING_SNAKE_CASE = new_word if len(lowerCamelCase ) == 1: break else: __SCREAMING_SNAKE_CASE = get_pairs(lowerCamelCase ) __SCREAMING_SNAKE_CASE = """ """.join(lowerCamelCase ) __SCREAMING_SNAKE_CASE = word return word def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(""" """ ) ) return bpe_tokens def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.encoder.get(lowerCamelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ): '''simple docstring''' return self.decoder.get(lowerCamelCase ) def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase ) __SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase ,ensure_ascii=lowerCamelCase ) + """\n""" ) __SCREAMING_SNAKE_CASE = 0 with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) __SCREAMING_SNAKE_CASE = token_index writer.write(""" """.join(lowerCamelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def UpperCAmelCase__ ( self : Any ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] __SCREAMING_SNAKE_CASE = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase ,token_ids_a=lowerCamelCase ,already_has_special_tokens=lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase )) + [1] return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1] def UpperCAmelCase__ ( self : str ,lowerCamelCase : List[int] ,lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str]=False ,**lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()): __SCREAMING_SNAKE_CASE = """ """ + text return (text, kwargs)
13
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' if num < 0: return False __SCREAMING_SNAKE_CASE = num __SCREAMING_SNAKE_CASE = 0 while num > 0: __SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __a ( unittest.TestCase ): def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 __SCREAMING_SNAKE_CASE = num_patches + 1 def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = ViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,) return config, pixel_values def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) __SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.type_sequence_label_size __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase ) __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(lowerCamelCase ) __SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = model_class(lowerCamelCase ) @jax.jit def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ): return model(pixel_values=lowerCamelCase ,**lowerCamelCase ) with self.subTest("""JIT Enabled""" ): __SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase )
13
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a = list[list[float | int]] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __UpperCAmelCase ): for row in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(__UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase ) ] def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase ) def interpolated_func(__UpperCAmelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__UpperCAmelCase ) ) return interpolated_func def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ): x_val += 1 ret += poly(__UpperCAmelCase ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
13
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __a ( _snake_case ): __UpperCamelCase : torch.FloatTensor class __a ( _snake_case, _snake_case ): @register_to_config def __init__( self : Optional[int] ,lowerCamelCase : int = 16 ,lowerCamelCase : int = 88 ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : int = 1 ,lowerCamelCase : float = 0.0 ,lowerCamelCase : int = 32 ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : str = "geglu" ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,): '''simple docstring''' super().__init__() __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = attention_head_dim __SCREAMING_SNAKE_CASE = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE = in_channels __SCREAMING_SNAKE_CASE = torch.nn.GroupNorm(num_groups=lowerCamelCase ,num_channels=lowerCamelCase ,eps=1E-6 ,affine=lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.Linear(lowerCamelCase ,lowerCamelCase ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE = nn.ModuleList( [ BasicTransformerBlock( lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,dropout=lowerCamelCase ,cross_attention_dim=lowerCamelCase ,activation_fn=lowerCamelCase ,attention_bias=lowerCamelCase ,double_self_attention=lowerCamelCase ,norm_elementwise_affine=lowerCamelCase ,) for d in range(lowerCamelCase ) ] ) __SCREAMING_SNAKE_CASE = nn.Linear(lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : int=None ,lowerCamelCase : Dict=None ,lowerCamelCase : str=1 ,lowerCamelCase : int=None ,lowerCamelCase : bool = True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape __SCREAMING_SNAKE_CASE = batch_frames // num_frames __SCREAMING_SNAKE_CASE = hidden_states __SCREAMING_SNAKE_CASE = hidden_states[None, :].reshape(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_states.permute(0 ,2 ,1 ,3 ,4 ) __SCREAMING_SNAKE_CASE = self.norm(lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_states.permute(0 ,3 ,4 ,2 ,1 ).reshape(batch_size * height * width ,lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.proj_in(lowerCamelCase ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE = block( lowerCamelCase ,encoder_hidden_states=lowerCamelCase ,timestep=lowerCamelCase ,cross_attention_kwargs=lowerCamelCase ,class_labels=lowerCamelCase ,) # 3. Output __SCREAMING_SNAKE_CASE = self.proj_out(lowerCamelCase ) __SCREAMING_SNAKE_CASE = ( hidden_states[None, None, :] .reshape(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) .permute(0 ,3 ,4 ,1 ,2 ) .contiguous() ) __SCREAMING_SNAKE_CASE = hidden_states.reshape(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=lowerCamelCase )
13
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class __a ( _snake_case ): def __init__( self : Union[str, Any] ,**lowerCamelCase : str ): '''simple docstring''' super().__init__(**lowerCamelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ): '''simple docstring''' return super().__call__(lowerCamelCase ,**lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" ) __SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" ) __SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] ) ] return result
13
1
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __SCREAMING_SNAKE_CASE = str(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = """""".join(sorted(__UpperCAmelCase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def __magic_name__ ( __UpperCAmelCase = 99 ) -> int: '''simple docstring''' if not 0 < percent < 100: raise ValueError("""solution() only accepts values from 0 to 100""" ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 while True: if check_bouncy(__UpperCAmelCase ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(99)}''')
13
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers a = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' require_version(deps[pkg] , __UpperCAmelCase )
13
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin a = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a = 250004 a = 250020 @require_sentencepiece @require_tokenizers class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Optional[int] = MBartaaTokenizer __UpperCamelCase : Optional[Any] = MBartaaTokenizerFast __UpperCamelCase : Optional[Any] = True __UpperCamelCase : Dict = True def UpperCAmelCase__ ( self : int ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = MBartaaTokenizer(lowerCamelCase ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """<s>""" __SCREAMING_SNAKE_CASE = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) ,lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,"""<s>""" ) self.assertEqual(vocab_keys[1] ,"""<pad>""" ) self.assertEqual(vocab_keys[-1] ,"""<mask>""" ) self.assertEqual(len(lowerCamelCase ) ,1054 ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,1054 ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MBartaaTokenizer(lowerCamelCase ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ,keep_accents=lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCamelCase ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] ,) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase ) self.assertListEqual( lowerCamelCase ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCamelCase ) self.assertListEqual( lowerCamelCase ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] ,) @slow def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {"""input_ids""": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase ,model_name="""facebook/mbart-large-50""" ,revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" ,) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) __SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(lowerCamelCase ,lowerCamelCase ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=True __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase ,legacy_format=lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase ,lowerCamelCase ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) # Save tokenizer rust, legacy_format=False __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase ,legacy_format=lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) ) shutil.rmtree(lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class __a ( unittest.TestCase ): __UpperCamelCase : str = 'facebook/mbart-large-50-one-to-many-mmt' __UpperCamelCase : Optional[Any] = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] __UpperCamelCase : Union[str, Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] __UpperCamelCase : Tuple = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2] @classmethod def UpperCAmelCase__ ( cls : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = MBartaaTokenizer.from_pretrained( cls.checkpoint_name ,src_lang="""en_XX""" ,tgt_lang="""ro_RO""" ) __SCREAMING_SNAKE_CASE = 1 return cls def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] ,25_0001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] ,25_0004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] ,25_0020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] ,25_0038 ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' self.assertIn(lowerCamelCase ,self.tokenizer.all_special_ids ) __SCREAMING_SNAKE_CASE = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] __SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCamelCase ,skip_special_tokens=lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase ,lowerCamelCase ) self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = 10 __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,max_length=lowerCamelCase ,truncation=lowerCamelCase ).input_ids[0] self.assertEqual(ids[0] ,lowerCamelCase ) self.assertEqual(ids[-1] ,2 ) self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[25_0053, 25_0001] ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = MBartaaTokenizer.from_pretrained(lowerCamelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase ) @require_torch def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase ,return_tensors="""pt""" ) __SCREAMING_SNAKE_CASE = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,) __SCREAMING_SNAKE_CASE = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase ,lowerCamelCase ) self.assertEqual((2, 14) ,batch.input_ids.shape ) self.assertEqual((2, 14) ,batch.attention_mask.shape ) __SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,lowerCamelCase ) self.assertEqual(2 ,batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=3 ,return_tensors="""pt""" ) __SCREAMING_SNAKE_CASE = self.tokenizer( text_target=self.tgt_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=10 ,return_tensors="""pt""" ) __SCREAMING_SNAKE_CASE = targets["""input_ids"""] __SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCamelCase ,self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs( """A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(lowerCamelCase ) ,{ # en_XX, A, test, EOS """input_ids""": [[25_0004, 62, 3034, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_0001, } ,)
13
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a = logging.getLogger(__name__) @dataclass class __a : __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, ) __UpperCamelCase : int = field( default=1024, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} ) __UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __a : __UpperCamelCase : str = field( default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) __UpperCamelCase : str = field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) def __magic_name__ ( ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1] __SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __SCREAMING_SNAKE_CASE = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __SCREAMING_SNAKE_CASE = False # Some models have set the order of the labels to use, so let's make sure we do use it. __SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1} __SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__UpperCAmelCase ): # Tokenize the texts def _convert_table_text_to_pandas(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __SCREAMING_SNAKE_CASE = examples["""statement"""] __SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __SCREAMING_SNAKE_CASE = raw_datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""train"""] if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = default_data_collator elif training_args.fpaa: __SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 ) else: __SCREAMING_SNAKE_CASE = None # Initialize our Trainer __SCREAMING_SNAKE_CASE = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE = last_checkpoint __SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = train_result.metrics __SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __UpperCAmelCase ) trainer.save_metrics("""train""" , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics("""eval""" , __UpperCAmelCase ) trainer.save_metrics("""eval""" , __UpperCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" ) __SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) __SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(__UpperCAmelCase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = label_list[item] writer.write(f"""{index}\t{item}\n""" ) __SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Any: '''simple docstring''' main() if __name__ == "__main__": main()
13
1
'''simple docstring''' from functools import reduce a = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __magic_name__ ( __UpperCAmelCase = N ) -> int: '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda __UpperCAmelCase , __UpperCAmelCase : str(int(__UpperCAmelCase ) * int(__UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(__UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(F'''{solution() = }''')
13
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
13
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __SCREAMING_SNAKE_CASE = load_file(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __SCREAMING_SNAKE_CASE = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) __SCREAMING_SNAKE_CASE = pipeline.text_encoder else: __SCREAMING_SNAKE_CASE = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) __SCREAMING_SNAKE_CASE = pipeline.unet # find the target layer __SCREAMING_SNAKE_CASE = layer_infos.pop(0 ) while len(__UpperCAmelCase ) > -1: try: __SCREAMING_SNAKE_CASE = curr_layer.__getattr__(__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: __SCREAMING_SNAKE_CASE = layer_infos.pop(0 ) elif len(__UpperCAmelCase ) == 0: break except Exception: if len(__UpperCAmelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __SCREAMING_SNAKE_CASE = layer_infos.pop(0 ) __SCREAMING_SNAKE_CASE = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(__UpperCAmelCase ) else: pair_keys.append(__UpperCAmelCase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __SCREAMING_SNAKE_CASE = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __SCREAMING_SNAKE_CASE = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__UpperCAmelCase , __UpperCAmelCase ).unsqueeze(2 ).unsqueeze(3 ) else: __SCREAMING_SNAKE_CASE = state_dict[pair_keys[0]].to(torch.floataa ) __SCREAMING_SNAKE_CASE = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__UpperCAmelCase , __UpperCAmelCase ) # update visited list for item in pair_keys: visited.append(__UpperCAmelCase ) return pipeline if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") a = parser.parse_args() a = args.base_model_path a = args.checkpoint_path a = args.dump_path a = args.lora_prefix_unet a = args.lora_prefix_text_encoder a = args.alpha a = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) a = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" ) __SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) __SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": a = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
13
1
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar a = TypeVar("T") def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return (position - 1) // 2 def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return (2 * position) + 1 def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return (2 * position) + 2 class __a ( Generic[T] ): def __init__( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __len__( self : Union[str, Any] ): '''simple docstring''' return self.elements def __repr__( self : Dict ): '''simple docstring''' return str(self.heap ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' return self.elements == 0 def UpperCAmelCase__ ( self : int ,lowerCamelCase : T ,lowerCamelCase : int ): '''simple docstring''' self.heap.append((elem, weight) ) __SCREAMING_SNAKE_CASE = self.elements self.elements += 1 self._bubble_up(lowerCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' if self.elements > 1: self._swap_nodes(0 ,self.elements - 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0] self._bubble_down(lowerCamelCase ) return elem def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : T ,lowerCamelCase : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE = (elem, weight) if position > 0: __SCREAMING_SNAKE_CASE = get_parent_position(lowerCamelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._bubble_up(lowerCamelCase ) else: self._bubble_down(lowerCamelCase ) else: self._bubble_down(lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : T ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.position_map[elem] if curr_pos == 0: return None __SCREAMING_SNAKE_CASE = get_parent_position(lowerCamelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(lowerCamelCase ,lowerCamelCase ) return self._bubble_up(lowerCamelCase ) return None def UpperCAmelCase__ ( self : int ,lowerCamelCase : T ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE = get_child_left_position(lowerCamelCase ) __SCREAMING_SNAKE_CASE = get_child_right_position(lowerCamelCase ) if child_left_position < self.elements and child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(lowerCamelCase ,lowerCamelCase ) return self._bubble_down(lowerCamelCase ) if child_left_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(lowerCamelCase ,lowerCamelCase ) return self._bubble_down(lowerCamelCase ) else: return None if child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(lowerCamelCase ,lowerCamelCase ) return self._bubble_down(lowerCamelCase ) return None def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ,lowerCamelCase : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __SCREAMING_SNAKE_CASE = nodea_pos __SCREAMING_SNAKE_CASE = nodea_pos class __a ( Generic[T] ): def __init__( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __repr__( self : Optional[int] ): '''simple docstring''' return str(self.connections ) def __len__( self : List[str] ): '''simple docstring''' return self.nodes def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : T ): '''simple docstring''' if node not in self.connections: __SCREAMING_SNAKE_CASE = {} self.nodes += 1 def UpperCAmelCase__ ( self : str ,lowerCamelCase : T ,lowerCamelCase : T ,lowerCamelCase : int ): '''simple docstring''' self.add_node(lowerCamelCase ) self.add_node(lowerCamelCase ) __SCREAMING_SNAKE_CASE = weight __SCREAMING_SNAKE_CASE = weight def __magic_name__ ( __UpperCAmelCase , ) -> tuple[dict[T, int], dict[T, T | None]]: '''simple docstring''' __SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections} __SCREAMING_SNAKE_CASE = {node: None for node in graph.connections} __SCREAMING_SNAKE_CASE = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__UpperCAmelCase , __UpperCAmelCase ) if priority_queue.is_empty(): return dist, parent # initialization __SCREAMING_SNAKE_CASE = priority_queue.extract_min() __SCREAMING_SNAKE_CASE = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__UpperCAmelCase , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node # running prim's algorithm while not priority_queue.is_empty(): __SCREAMING_SNAKE_CASE = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__UpperCAmelCase , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node return dist, parent
13
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class __a ( _snake_case ): __UpperCamelCase : Tuple = 'camembert' def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class __a ( _snake_case ): @property def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
13
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __a : @staticmethod def UpperCAmelCase__ ( *lowerCamelCase : Optional[Any] ,**lowerCamelCase : str ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class __a ( unittest.TestCase ): __UpperCamelCase : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=lowerCamelCase ,image_processor=lowerCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ,threshold=0.0 ) self.assertGreater(len(lowerCamelCase ) ,0 ) for detected_object in outputs: self.assertEqual( lowerCamelCase ,{ """score""": ANY(lowerCamelCase ), """label""": ANY(lowerCamelCase ), """box""": {"""xmin""": ANY(lowerCamelCase ), """ymin""": ANY(lowerCamelCase ), """xmax""": ANY(lowerCamelCase ), """ymax""": ANY(lowerCamelCase )}, } ,) import datasets __SCREAMING_SNAKE_CASE = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" ,"""image""" ,split="""test""" ) __SCREAMING_SNAKE_CASE = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] __SCREAMING_SNAKE_CASE = object_detector(lowerCamelCase ,threshold=0.0 ) self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(lowerCamelCase ) ,0 ) for detected_object in outputs: self.assertEqual( lowerCamelCase ,{ """score""": ANY(lowerCamelCase ), """label""": ANY(lowerCamelCase ), """box""": {"""xmin""": ANY(lowerCamelCase ), """ymin""": ANY(lowerCamelCase ), """xmax""": ANY(lowerCamelCase ), """ymax""": ANY(lowerCamelCase )}, } ,) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' pass @require_torch def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-detr-mobilenetsv3""" __SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=lowerCamelCase ,feature_extractor=lowerCamelCase ) __SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=0.0 ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=4 ) ,[ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] ,) __SCREAMING_SNAKE_CASE = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ,threshold=0.0 ,) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=4 ) ,[ [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] ,) @require_torch @slow def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50""" __SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=lowerCamelCase ,feature_extractor=lowerCamelCase ) __SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=4 ) ,[ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] ,) __SCREAMING_SNAKE_CASE = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=4 ) ,[ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] ,) @require_torch @slow def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50""" __SCREAMING_SNAKE_CASE = pipeline("""object-detection""" ,model=lowerCamelCase ) __SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=4 ) ,[ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] ,) __SCREAMING_SNAKE_CASE = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=4 ) ,[ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] ,) @require_torch @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0.9_985 __SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50""" __SCREAMING_SNAKE_CASE = pipeline("""object-detection""" ,model=lowerCamelCase ) __SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=lowerCamelCase ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=4 ) ,[ {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] ,) @require_torch @require_pytesseract @slow def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """Narsil/layoutlmv3-finetuned-funsd""" __SCREAMING_SNAKE_CASE = 0.9_993 __SCREAMING_SNAKE_CASE = pipeline("""object-detection""" ,model=lowerCamelCase ,threshold=lowerCamelCase ) __SCREAMING_SNAKE_CASE = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=4 ) ,[ {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] ,)
13
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __a ( unittest.TestCase ): def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 __SCREAMING_SNAKE_CASE = num_patches + 1 def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = ViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,) return config, pixel_values def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) __SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.type_sequence_label_size __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase ) __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(lowerCamelCase ) __SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = model_class(lowerCamelCase ) @jax.jit def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ): return model(pixel_values=lowerCamelCase ,**lowerCamelCase ) with self.subTest("""JIT Enabled""" ): __SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase )
13
1
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset a = random.Random() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' if rng is None: __SCREAMING_SNAKE_CASE = global_rng __SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __a ( unittest.TestCase ): def __init__( self : List[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any]=7 ,lowerCamelCase : Union[str, Any]=400 ,lowerCamelCase : Any=2000 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : Any=128 ,lowerCamelCase : List[Any]=1 ,lowerCamelCase : str=512 ,lowerCamelCase : Any=30 ,lowerCamelCase : List[str]=4_4100 ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = min_seq_length __SCREAMING_SNAKE_CASE = max_seq_length __SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = feature_size __SCREAMING_SNAKE_CASE = num_audio_channels __SCREAMING_SNAKE_CASE = hop_length __SCREAMING_SNAKE_CASE = chunk_length __SCREAMING_SNAKE_CASE = sampling_rate def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def UpperCAmelCase__ ( self : str ,lowerCamelCase : str=False ,lowerCamelCase : Any=False ): '''simple docstring''' def _flatten(lowerCamelCase : Optional[int] ): return list(itertools.chain(*lowerCamelCase ) ) if equal_length: __SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __SCREAMING_SNAKE_CASE = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: __SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Any = TvltFeatureExtractor def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""sampling_rate""" ) ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase )[0] check_json_file_has_correct_format(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() __SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() __SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) __SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase ,lowerCamelCase ) ) self.assertEqual(lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase ) __SCREAMING_SNAKE_CASE = feat_extract_first.to_dict() __SCREAMING_SNAKE_CASE = feat_extract_second.to_dict() __SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" ) __SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase ,lowerCamelCase ) ) self.assertEqual(lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 __SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] __SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs] # Test not batched input __SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched __SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase ,return_tensors="""np""" ,sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking __SCREAMING_SNAKE_CASE = feature_extractor( lowerCamelCase ,return_tensors="""np""" ,sampling_rate=4_4100 ,mask_audio=lowerCamelCase ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. __SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] __SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase ) __SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase ,return_tensors="""np""" ,sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech __SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) __SCREAMING_SNAKE_CASE = TvltFeatureExtractor() __SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) __SCREAMING_SNAKE_CASE = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase ,atol=1E-4 ) )
13
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class __a ( _snake_case ): __UpperCamelCase : Tuple = 'sew' def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,): '''simple docstring''' super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = feat_extract_norm __SCREAMING_SNAKE_CASE = feat_extract_activation __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = conv_bias __SCREAMING_SNAKE_CASE = num_conv_pos_embeddings __SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups __SCREAMING_SNAKE_CASE = len(self.conv_dim ) __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = squeeze_factor __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = feat_proj_dropout __SCREAMING_SNAKE_CASE = final_dropout __SCREAMING_SNAKE_CASE = layerdrop __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __SCREAMING_SNAKE_CASE = apply_spec_augment __SCREAMING_SNAKE_CASE = mask_time_prob __SCREAMING_SNAKE_CASE = mask_time_length __SCREAMING_SNAKE_CASE = mask_time_min_masks __SCREAMING_SNAKE_CASE = mask_feature_prob __SCREAMING_SNAKE_CASE = mask_feature_length __SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss __SCREAMING_SNAKE_CASE = ctc_loss_reduction __SCREAMING_SNAKE_CASE = ctc_zero_infinity # sequence classification __SCREAMING_SNAKE_CASE = use_weighted_layer_sum __SCREAMING_SNAKE_CASE = classifier_proj_size @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return functools.reduce(operator.mul ,self.conv_stride ,1 )
13
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : int = KandinskyVaaInpaintPipeline __UpperCamelCase : List[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image'] __UpperCamelCase : Any = [ 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] __UpperCamelCase : List[str] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __UpperCamelCase : Optional[int] = False @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return 32 @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return self.time_input_dim @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return 100 @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __SCREAMING_SNAKE_CASE = UNetaDConditionModel(**lowerCamelCase ) return model @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.dummy_unet __SCREAMING_SNAKE_CASE = self.dummy_movq __SCREAMING_SNAKE_CASE = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00_085 ,beta_end=0.012 ,clip_sample=lowerCamelCase ,set_alpha_to_one=lowerCamelCase ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=lowerCamelCase ,) __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple=0 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( lowerCamelCase ) # create init_image __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase ) __SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 )[0] __SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((256, 256) ) # create mask __SCREAMING_SNAKE_CASE = np.ones((64, 64) ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE = 0 if str(lowerCamelCase ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase ) __SCREAMING_SNAKE_CASE = { """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """cpu""" __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCamelCase ) __SCREAMING_SNAKE_CASE = pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCamelCase ) ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = pipe( **self.get_dummy_inputs(lowerCamelCase ) ,return_dict=lowerCamelCase ,)[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __SCREAMING_SNAKE_CASE = np.ones((768, 768) ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = """a hat""" __SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase ) __SCREAMING_SNAKE_CASE = KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""" ,torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE = pipeline.to(lowerCamelCase ) pipeline.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pipe_prior( lowerCamelCase ,generator=lowerCamelCase ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() __SCREAMING_SNAKE_CASE = pipeline( image=lowerCamelCase ,mask_image=lowerCamelCase ,image_embeds=lowerCamelCase ,negative_image_embeds=lowerCamelCase ,generator=lowerCamelCase ,num_inference_steps=100 ,height=768 ,width=768 ,output_type="""np""" ,) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCamelCase ,lowerCamelCase )
13
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 0 for divide_by_number in range(__UpperCAmelCase , digit + 1 ): __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = divide_by_number else: has_been_divided.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) __SCREAMING_SNAKE_CASE = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE = max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) return "0b" + "".join( str(int(char_a == """1""" and char_b == """1""" ) ) for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __a ( unittest.TestCase ): def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224} __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_convert_rgb def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __SCREAMING_SNAKE_CASE = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) ) else: __SCREAMING_SNAKE_CASE = [] for i in range(self.batch_size ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 ) image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs] if torchify: __SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase ) @property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase ) __SCREAMING_SNAKE_CASE = 3 @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
13
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) sd_pipe.set_scheduler("""sample_euler""" ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe([prompt] ,generator=lowerCamelCase ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) sd_pipe.set_scheduler("""sample_euler""" ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe([prompt] ,generator=lowerCamelCase ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) __SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sd_pipe( [prompt] ,generator=lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type="""np""" ,use_karras_sigmas=lowerCamelCase ,) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
13
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __magic_name__ ( __UpperCAmelCase ) -> Tuple: '''simple docstring''' def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = timeit.default_timer() __SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime return delta __SCREAMING_SNAKE_CASE = func.__name__ return wrapper def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = seq_shapes or {} for i in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__UpperCAmelCase , _ArrayXD ): __SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__UpperCAmelCase , datasets.Value ): if v.dtype == "string": __SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged.""" else: __SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__UpperCAmelCase , datasets.Sequence ): while isinstance(__UpperCAmelCase , datasets.Sequence ): __SCREAMING_SNAKE_CASE = v.feature __SCREAMING_SNAKE_CASE = seq_shapes[k] __SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype ) __SCREAMING_SNAKE_CASE = data dummy_data.append((i, example) ) return dummy_data def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase ) with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer: for key, record in dummy_data: __SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase ) writer.write(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) __SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) ) return dataset
13
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __a ( metaclass=_snake_case ): __UpperCamelCase : List[str] = ['sentencepiece'] def __init__( self : int ,*lowerCamelCase : Tuple ,**lowerCamelCase : Dict ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self : Dict ,*lowerCamelCase : List[str] ,**lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Optional[int] = ['sentencepiece'] def __init__( self : Tuple ,*lowerCamelCase : str ,**lowerCamelCase : Any ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[Any] = ['sentencepiece'] def __init__( self : Any ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[str] = ['sentencepiece'] def __init__( self : Dict ,*lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Optional[int] = ['sentencepiece'] def __init__( self : Optional[Any] ,*lowerCamelCase : int ,**lowerCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[Any] = ['sentencepiece'] def __init__( self : List[str] ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : str ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Optional[Any] = ['sentencepiece'] def __init__( self : Optional[Any] ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Any = ['sentencepiece'] def __init__( self : Optional[int] ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : Tuple ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[str] = ['sentencepiece'] def __init__( self : int ,*lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Dict ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : int = ['sentencepiece'] def __init__( self : Tuple ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Optional[int] = ['sentencepiece'] def __init__( self : str ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : List[str] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Any = ['sentencepiece'] def __init__( self : Union[str, Any] ,*lowerCamelCase : Tuple ,**lowerCamelCase : List[str] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Tuple = ['sentencepiece'] def __init__( self : Dict ,*lowerCamelCase : List[str] ,**lowerCamelCase : int ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Tuple = ['sentencepiece'] def __init__( self : int ,*lowerCamelCase : Any ,**lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self : Dict ,*lowerCamelCase : Any ,**lowerCamelCase : int ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Any = ['sentencepiece'] def __init__( self : Dict ,*lowerCamelCase : str ,**lowerCamelCase : Optional[Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Any = ['sentencepiece'] def __init__( self : Tuple ,*lowerCamelCase : str ,**lowerCamelCase : str ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : int = ['sentencepiece'] def __init__( self : str ,*lowerCamelCase : str ,**lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[Any] = ['sentencepiece'] def __init__( self : List[str] ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : str = ['sentencepiece'] def __init__( self : Optional[int] ,*lowerCamelCase : Tuple ,**lowerCamelCase : List[str] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[Any] = ['sentencepiece'] def __init__( self : Any ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : str ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[str] = ['sentencepiece'] def __init__( self : str ,*lowerCamelCase : Dict ,**lowerCamelCase : Dict ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Dict = ['sentencepiece'] def __init__( self : Any ,*lowerCamelCase : List[str] ,**lowerCamelCase : Tuple ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : str = ['sentencepiece'] def __init__( self : int ,*lowerCamelCase : Tuple ,**lowerCamelCase : Tuple ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Optional[Any] = ['sentencepiece'] def __init__( self : int ,*lowerCamelCase : Dict ,**lowerCamelCase : str ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[str] = ['sentencepiece'] def __init__( self : Tuple ,*lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Union[str, Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : List[Any] = ['sentencepiece'] def __init__( self : str ,*lowerCamelCase : int ,**lowerCamelCase : List[str] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Tuple = ['sentencepiece'] def __init__( self : List[Any] ,*lowerCamelCase : Dict ,**lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Union[str, Any] = ['sentencepiece'] def __init__( self : Union[str, Any] ,*lowerCamelCase : List[str] ,**lowerCamelCase : Any ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] ) class __a ( metaclass=_snake_case ): __UpperCamelCase : Dict = ['sentencepiece'] def __init__( self : Tuple ,*lowerCamelCase : List[Any] ,**lowerCamelCase : List[Any] ): '''simple docstring''' requires_backends(self ,["""sentencepiece"""] )
13
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a = "__DUMMY_TRANSFORMERS_USER__" a = "Dummy User" a = "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" a = "https://hub-ci.huggingface.co" a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}" a = Path("~/.huggingface/hub_ci_token").expanduser() @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(__UpperCAmelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __magic_name__ ( ) -> Optional[Any]: '''simple docstring''' return HfApi(endpoint=__UpperCAmelCase ) @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfFolder.get_token() HfFolder.save_token(__UpperCAmelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' def _cleanup_repo(__UpperCAmelCase ): hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' @contextmanager def _temporary_repo(__UpperCAmelCase ): try: yield repo_id finally: cleanup_repo(__UpperCAmelCase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
13
1
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a = logging.getLogger(__name__) @dataclass class __a : __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, ) __UpperCamelCase : int = field( default=1024, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} ) __UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __a : __UpperCamelCase : str = field( default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) __UpperCamelCase : str = field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) def __magic_name__ ( ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1] __SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __SCREAMING_SNAKE_CASE = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __SCREAMING_SNAKE_CASE = False # Some models have set the order of the labels to use, so let's make sure we do use it. __SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1} __SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__UpperCAmelCase ): # Tokenize the texts def _convert_table_text_to_pandas(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __SCREAMING_SNAKE_CASE = examples["""statement"""] __SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __SCREAMING_SNAKE_CASE = raw_datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""train"""] if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = default_data_collator elif training_args.fpaa: __SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 ) else: __SCREAMING_SNAKE_CASE = None # Initialize our Trainer __SCREAMING_SNAKE_CASE = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE = last_checkpoint __SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = train_result.metrics __SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __UpperCAmelCase ) trainer.save_metrics("""train""" , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics("""eval""" , __UpperCAmelCase ) trainer.save_metrics("""eval""" , __UpperCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" ) __SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) __SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(__UpperCAmelCase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = label_list[item] writer.write(f"""{index}\t{item}\n""" ) __SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Any: '''simple docstring''' main() if __name__ == "__main__": main()
13
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a = logging.get_logger(__name__) a = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : Dict = 'deta' __UpperCamelCase : List[str] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,): '''simple docstring''' if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] __SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase ) __SCREAMING_SNAKE_CASE = backbone_config __SCREAMING_SNAKE_CASE = num_queries __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = init_xavier_std __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = auxiliary_loss __SCREAMING_SNAKE_CASE = position_embedding_type # deformable attributes __SCREAMING_SNAKE_CASE = num_feature_levels __SCREAMING_SNAKE_CASE = encoder_n_points __SCREAMING_SNAKE_CASE = decoder_n_points __SCREAMING_SNAKE_CASE = two_stage __SCREAMING_SNAKE_CASE = two_stage_num_proposals __SCREAMING_SNAKE_CASE = with_box_refine __SCREAMING_SNAKE_CASE = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __SCREAMING_SNAKE_CASE = class_cost __SCREAMING_SNAKE_CASE = bbox_cost __SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients __SCREAMING_SNAKE_CASE = mask_loss_coefficient __SCREAMING_SNAKE_CASE = dice_loss_coefficient __SCREAMING_SNAKE_CASE = bbox_loss_coefficient __SCREAMING_SNAKE_CASE = giou_loss_coefficient __SCREAMING_SNAKE_CASE = eos_coefficient __SCREAMING_SNAKE_CASE = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase ) @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return self.encoder_attention_heads @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.d_model def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output
13
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a = logging.get_logger(__name__) a = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } a = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = {} with open(__UpperCAmelCase , """r""" ) as file: for line_number, line in enumerate(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = line.strip() if line: __SCREAMING_SNAKE_CASE = line.split() __SCREAMING_SNAKE_CASE = line_number __SCREAMING_SNAKE_CASE = words[0] __SCREAMING_SNAKE_CASE = value return result def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' for attribute in key.split(""".""" ): __SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]] __SCREAMING_SNAKE_CASE = """param""" if weight_type is not None and weight_type != "param": __SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape elif weight_type is not None and weight_type == "param": __SCREAMING_SNAKE_CASE = hf_pointer for attribute in hf_param_name.split(""".""" ): __SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = shape_pointer.shape # let's reduce dimension __SCREAMING_SNAKE_CASE = value[0] else: __SCREAMING_SNAKE_CASE = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __SCREAMING_SNAKE_CASE = value elif weight_type == "weight_g": __SCREAMING_SNAKE_CASE = value elif weight_type == "weight_v": __SCREAMING_SNAKE_CASE = value elif weight_type == "bias": __SCREAMING_SNAKE_CASE = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): __SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = value else: __SCREAMING_SNAKE_CASE = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]] __SCREAMING_SNAKE_CASE = """param""" if weight_type is not None and weight_type != "param": __SCREAMING_SNAKE_CASE = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __SCREAMING_SNAKE_CASE = """.""".join([key, hf_param_name] ) else: __SCREAMING_SNAKE_CASE = key __SCREAMING_SNAKE_CASE = value if """lm_head""" in full_key else value[0] a = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = False for key, mapped_key in MAPPING.items(): __SCREAMING_SNAKE_CASE = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __SCREAMING_SNAKE_CASE = True if "*" in mapped_key: __SCREAMING_SNAKE_CASE = name.split(__UpperCAmelCase )[0].split(""".""" )[-2] __SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __UpperCAmelCase ) if "weight_g" in name: __SCREAMING_SNAKE_CASE = """weight_g""" elif "weight_v" in name: __SCREAMING_SNAKE_CASE = """weight_v""" elif "bias" in name: __SCREAMING_SNAKE_CASE = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __SCREAMING_SNAKE_CASE = """weight""" else: __SCREAMING_SNAKE_CASE = None if hf_dict is not None: rename_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return is_used return is_used def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = fairseq_model.state_dict() __SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __SCREAMING_SNAKE_CASE = False if "conv_layers" in name: load_conv_layer( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) __SCREAMING_SNAKE_CASE = True else: __SCREAMING_SNAKE_CASE = load_wavaveca_layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if not is_used: unused_weights.append(__UpperCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1] __SCREAMING_SNAKE_CASE = name.split(""".""" ) __SCREAMING_SNAKE_CASE = int(items[0] ) __SCREAMING_SNAKE_CASE = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __SCREAMING_SNAKE_CASE = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __SCREAMING_SNAKE_CASE = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __SCREAMING_SNAKE_CASE = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __SCREAMING_SNAKE_CASE = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCAmelCase ) @torch.no_grad() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False ) -> List[Any]: '''simple docstring''' if config_path is not None: __SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = WavaVecaConfig() if is_seq_class: __SCREAMING_SNAKE_CASE = read_txt_into_dict(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ) feature_extractor.save_pretrained(__UpperCAmelCase ) elif is_finetuned: if dict_path: __SCREAMING_SNAKE_CASE = Dictionary.load(__UpperCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __SCREAMING_SNAKE_CASE = target_dict.pad_index __SCREAMING_SNAKE_CASE = target_dict.bos_index __SCREAMING_SNAKE_CASE = target_dict.eos_index __SCREAMING_SNAKE_CASE = len(target_dict.symbols ) __SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """vocab.json""" ) if not os.path.isdir(__UpperCAmelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) ) return os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = target_dict.indices # fairseq has the <pad> and <s> switched __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer( __UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False __SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) processor.save_pretrained(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = WavaVecaForCTC(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(__UpperCAmelCase ) if is_finetuned or is_seq_class: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __SCREAMING_SNAKE_CASE = argparse.Namespace(task="""audio_pretraining""" ) __SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = model[0].eval() recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) a = parser.parse_args() a = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
13
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : List[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape __SCREAMING_SNAKE_CASE = jax.image.resize( lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,) __SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : List[str] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : int = None __UpperCamelCase : float = 0.0 __UpperCamelCase : bool = None __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels __SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype ) __SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob ) __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __SCREAMING_SNAKE_CASE = None if use_nin_shortcut: __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,) def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' __SCREAMING_SNAKE_CASE = hidden_states __SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) ) __SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 ) __SCREAMING_SNAKE_CASE = hidden_states + temb __SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase ) if self.conv_shortcut is not None: __SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase ) return hidden_states + residual
13
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a ( unittest.TestCase ): @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.dummy_uncond_unet __SCREAMING_SNAKE_CASE = ScoreSdeVeScheduler() __SCREAMING_SNAKE_CASE = ScoreSdeVePipeline(unet=lowerCamelCase ,scheduler=lowerCamelCase ) sde_ve.to(lowerCamelCase ) sde_ve.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=lowerCamelCase ).images __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=lowerCamelCase ,return_dict=lowerCamelCase )[ 0 ] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """google/ncsnpp-church-256""" __SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = ScoreSdeVeScheduler.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = ScoreSdeVePipeline(unet=lowerCamelCase ,scheduler=lowerCamelCase ) sde_ve.to(lowerCamelCase ) sde_ve.set_progress_bar_config(disable=lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = sde_ve(num_inference_steps=10 ,output_type="""numpy""" ,generator=lowerCamelCase ).images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) __SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
13
'''simple docstring''' import sys from collections import defaultdict class __a : def __init__( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pos def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: __SCREAMING_SNAKE_CASE = 2 * start + 2 if heap[smallest_child] < heap[start]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( heap[start], positions[start], ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa __SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,lowerCamelCase ) self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = position[index] while index != 0: __SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __SCREAMING_SNAKE_CASE = heap[parent] __SCREAMING_SNAKE_CASE = position[parent] self.set_position(position[parent] ,lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,lowerCamelCase ) break __SCREAMING_SNAKE_CASE = parent else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,0 ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1 for i in range(lowerCamelCase ,-1 ,-1 ): self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = positions[0] __SCREAMING_SNAKE_CASE = sys.maxsize self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase ) return temp def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = Heap() __SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex __SCREAMING_SNAKE_CASE = [] for vertex in range(len(__UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__UpperCAmelCase ) heap.node_position.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = sys.maxsize for neighbor, distance in adjacency_list[0]: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = distance heap.heapify(__UpperCAmelCase , __UpperCAmelCase ) for _ in range(1 , len(__UpperCAmelCase ) ): __SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __SCREAMING_SNAKE_CASE = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__UpperCAmelCase )] ): __SCREAMING_SNAKE_CASE = distance heap.bottom_to_top( __UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a = int(input("Enter number of edges: ").strip()) a = defaultdict(list) for _ in range(edges_number): a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a = "3" print("Python version:", sys.version) print("OS platform:", platform.platform()) print("OS architecture:", platform.machine()) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) except ImportError: print("Torch version:", None) try: import transformers print("transformers version:", transformers.__version__) except ImportError: print("transformers version:", None)
13
'''simple docstring''' import os import string import sys a = 1 << 8 a = { "tab": ord("\t"), "newline": ord("\r"), "esc": 27, "up": 65 + ARROW_KEY_FLAG, "down": 66 + ARROW_KEY_FLAG, "right": 67 + ARROW_KEY_FLAG, "left": 68 + ARROW_KEY_FLAG, "mod_int": 91, "undefined": sys.maxsize, "interrupt": 3, "insert": 50, "delete": 51, "pg_up": 53, "pg_down": 54, } a = KEYMAP["up"] a = KEYMAP["left"] if sys.platform == "win32": a = [] a = { b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, } for i in range(10): a = ord(str(i)) def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' if os.name == "nt": import msvcrt __SCREAMING_SNAKE_CASE = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(__UpperCAmelCase ) == 0: # Read the keystroke __SCREAMING_SNAKE_CASE = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __SCREAMING_SNAKE_CASE = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(__UpperCAmelCase ) if ord(__UpperCAmelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) __SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] ) except KeyError: __SCREAMING_SNAKE_CASE = cha[1] else: __SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __SCREAMING_SNAKE_CASE = sys.stdin.fileno() __SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase ) try: tty.setraw(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = sys.stdin.read(1 ) finally: termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase ) return ch def __magic_name__ ( ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(__UpperCAmelCase ) == KEYMAP["esc"]: __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]: __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a = { "configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "GraphormerForGraphClassification", "GraphormerModel", "GraphormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' from __future__ import annotations import bisect def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' if hi < 0: __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) while lo < hi: __SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __SCREAMING_SNAKE_CASE = mid + 1 else: __SCREAMING_SNAKE_CASE = mid return lo def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' if hi < 0: __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) while lo < hi: __SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __SCREAMING_SNAKE_CASE = mid + 1 else: __SCREAMING_SNAKE_CASE = mid return lo def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1 while left <= right: __SCREAMING_SNAKE_CASE = left + (right - left) // 2 __SCREAMING_SNAKE_CASE = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __SCREAMING_SNAKE_CASE = midpoint - 1 else: __SCREAMING_SNAKE_CASE = midpoint + 1 return None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' __SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase ) if index != len(__UpperCAmelCase ) and sorted_collection[index] == item: return index return None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' if right < left: return None __SCREAMING_SNAKE_CASE = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 ) else: return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase ) if __name__ == "__main__": a = input("Enter numbers separated by comma:\n").strip() a = sorted(int(item) for item in user_input.split(",")) a = int(input("Enter a single number to be found in the list:\n")) a = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
13
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
13
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging a = logging.get_logger(__name__) class __a ( _snake_case ): __UpperCamelCase : int = 'linear' __UpperCamelCase : Tuple = 'cosine' __UpperCamelCase : Tuple = 'cosine_with_restarts' __UpperCamelCase : List[Any] = 'polynomial' __UpperCamelCase : Optional[Any] = 'constant' __UpperCamelCase : Optional[int] = 'constant_with_warmup' __UpperCamelCase : List[Any] = 'piecewise_constant' def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) ) return 1.0 return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" ) __SCREAMING_SNAKE_CASE = int(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = float(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = value __SCREAMING_SNAKE_CASE = float(rule_list[-1] ) def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ): def rule_func(__UpperCAmelCase ) -> float: __SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__UpperCAmelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __SCREAMING_SNAKE_CASE = lr_init - lr_end __SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps __SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps __SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) a = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
13
1
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" ) __SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) __SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": a = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class __a ( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self : List[str] ,lowerCamelCase : float ,lowerCamelCase : Callable ,lowerCamelCase : int ,lowerCamelCase : float = 1.0 ,lowerCamelCase : str = None ,): '''simple docstring''' super().__init__() __SCREAMING_SNAKE_CASE = initial_learning_rate __SCREAMING_SNAKE_CASE = warmup_steps __SCREAMING_SNAKE_CASE = power __SCREAMING_SNAKE_CASE = decay_schedule_fn __SCREAMING_SNAKE_CASE = name def __call__( self : List[str] ,lowerCamelCase : List[str] ): '''simple docstring''' with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __SCREAMING_SNAKE_CASE = tf.cast(lowerCamelCase ,tf.floataa ) __SCREAMING_SNAKE_CASE = tf.cast(self.warmup_steps ,tf.floataa ) __SCREAMING_SNAKE_CASE = global_step_float / warmup_steps_float __SCREAMING_SNAKE_CASE = self.initial_learning_rate * tf.math.pow(lowerCamelCase ,self.power ) return tf.cond( global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=lowerCamelCase ,) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 0.9 , __UpperCAmelCase = 0.9_9_9 , __UpperCAmelCase = 1e-8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = None , ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__UpperCAmelCase , ) if num_warmup_steps: __SCREAMING_SNAKE_CASE = WarmUp( initial_learning_rate=__UpperCAmelCase , decay_schedule_fn=__UpperCAmelCase , warmup_steps=__UpperCAmelCase , ) if weight_decay_rate > 0.0: __SCREAMING_SNAKE_CASE = AdamWeightDecay( learning_rate=__UpperCAmelCase , weight_decay_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__UpperCAmelCase , ) else: __SCREAMING_SNAKE_CASE = tf.keras.optimizers.Adam( learning_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class __a ( _snake_case ): def __init__( self : List[Any] ,lowerCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 ,lowerCamelCase : float = 0.9 ,lowerCamelCase : float = 0.999 ,lowerCamelCase : float = 1E-7 ,lowerCamelCase : bool = False ,lowerCamelCase : float = 0.0 ,lowerCamelCase : Optional[List[str]] = None ,lowerCamelCase : Optional[List[str]] = None ,lowerCamelCase : str = "AdamWeightDecay" ,**lowerCamelCase : str ,): '''simple docstring''' super().__init__(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = weight_decay_rate __SCREAMING_SNAKE_CASE = include_in_weight_decay __SCREAMING_SNAKE_CASE = exclude_from_weight_decay @classmethod def UpperCAmelCase__ ( cls : int ,lowerCamelCase : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {"""WarmUp""": WarmUp} return super(lowerCamelCase ,cls ).from_config(lowerCamelCase ,custom_objects=lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Dict ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ): '''simple docstring''' super(lowerCamelCase ,self )._prepare_local(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = tf.constant( self.weight_decay_rate ,name="""adam_weight_decay_rate""" ) def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Any ,lowerCamelCase : Tuple ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] ,use_locking=self._use_locking ,) return tf.no_op() def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : int ,lowerCamelCase : Dict=None ,**lowerCamelCase : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(zip(*lowerCamelCase ) ) return super(lowerCamelCase ,self ).apply_gradients(zip(lowerCamelCase ,lowerCamelCase ) ,name=lowerCamelCase ,**lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ): '''simple docstring''' if apply_state is None: return self._decayed_lr_t[var_dtype], {} __SCREAMING_SNAKE_CASE = apply_state or {} __SCREAMING_SNAKE_CASE = apply_state.get((var_device, var_dtype) ) if coefficients is None: __SCREAMING_SNAKE_CASE = self._fallback_apply_state(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Any ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str]=None ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_lr(var.device ,var.dtype.base_dtype ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self._decay_weights_op(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) with tf.control_dependencies([decay] ): return super(lowerCamelCase ,self )._resource_apply_dense(lowerCamelCase ,lowerCamelCase ,**lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Dict ,lowerCamelCase : int ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=None ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._get_lr(var.device ,var.dtype.base_dtype ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self._decay_weights_op(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) with tf.control_dependencies([decay] ): return super(lowerCamelCase ,self )._resource_apply_sparse(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,**lowerCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Optional[int] ): '''simple docstring''' if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowerCamelCase ,lowerCamelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowerCamelCase ,lowerCamelCase ) is not None: return False return True class __a ( _snake_case ): def __init__( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = None @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' if self._accum_steps is None: __SCREAMING_SNAKE_CASE = tf.Variable( tf.constant(0 ,dtype=tf.intaa ) ,trainable=lowerCamelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,) return self._accum_steps.value() @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self : str ,lowerCamelCase : Any ): '''simple docstring''' if not self._gradients: __SCREAMING_SNAKE_CASE = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowerCamelCase ) ,trainable=lowerCamelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,) if gradient is not None else gradient for gradient in gradients ] ) if len(lowerCamelCase ) != len(self._gradients ): raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase )}""" ) for accum_gradient, gradient in zip(self._gradients ,lowerCamelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowerCamelCase ) self._accum_steps.assign_add(1 ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowerCamelCase ) )
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" ) __SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""" , class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
13
1
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class __a : def __init__( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : float ): '''simple docstring''' if nodea not in self.connections: self.add_node(lowerCamelCase ) if nodea not in self.connections: self.add_node(lowerCamelCase ) __SCREAMING_SNAKE_CASE = probability def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return list(self.connections ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict[str, int]: '''simple docstring''' __SCREAMING_SNAKE_CASE = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = Counter(graph.get_nodes() ) __SCREAMING_SNAKE_CASE = start for _ in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = graph.transition(__UpperCAmelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' if num < 0: return False __SCREAMING_SNAKE_CASE = num __SCREAMING_SNAKE_CASE = 0 while num > 0: __SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def __magic_name__ ( __UpperCAmelCase = 1000000 , __UpperCAmelCase = 10 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = defaultdict(__UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: __SCREAMING_SNAKE_CASE = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: __SCREAMING_SNAKE_CASE = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F'''{solution() = }''')
13
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a = list[list[float | int]] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __UpperCAmelCase ): for row in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(__UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase ) ] def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase ) def interpolated_func(__UpperCAmelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__UpperCAmelCase ) ) return interpolated_func def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ): x_val += 1 ret += poly(__UpperCAmelCase ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
13
1
'''simple docstring''' import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : int __UpperCamelCase : float = 0.0 __UpperCamelCase : int = 1 __UpperCamelCase : int = 1 __UpperCamelCase : bool = True __UpperCamelCase : bool = False __UpperCamelCase : bool = False __UpperCamelCase : bool = False __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for i in range(self.num_layers ): __SCREAMING_SNAKE_CASE = self.in_channels if i == 0 else self.out_channels __SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=lowerCamelCase ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = resnets __SCREAMING_SNAKE_CASE = attentions if self.add_downsample: __SCREAMING_SNAKE_CASE = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self : Any ,lowerCamelCase : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple=True ): '''simple docstring''' __SCREAMING_SNAKE_CASE = () for resnet, attn in zip(self.resnets ,self.attentions ): __SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase ) __SCREAMING_SNAKE_CASE = attn(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase ) output_states += (hidden_states,) if self.add_downsample: __SCREAMING_SNAKE_CASE = self.downsamplers_a(lowerCamelCase ) output_states += (hidden_states,) return hidden_states, output_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : int __UpperCamelCase : float = 0.0 __UpperCamelCase : int = 1 __UpperCamelCase : bool = True __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for i in range(self.num_layers ): __SCREAMING_SNAKE_CASE = self.in_channels if i == 0 else self.out_channels __SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=lowerCamelCase ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = resnets if self.add_downsample: __SCREAMING_SNAKE_CASE = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self : str ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Dict=True ): '''simple docstring''' __SCREAMING_SNAKE_CASE = () for resnet in self.resnets: __SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase ) output_states += (hidden_states,) if self.add_downsample: __SCREAMING_SNAKE_CASE = self.downsamplers_a(lowerCamelCase ) output_states += (hidden_states,) return hidden_states, output_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : int __UpperCamelCase : int __UpperCamelCase : float = 0.0 __UpperCamelCase : int = 1 __UpperCamelCase : int = 1 __UpperCamelCase : bool = True __UpperCamelCase : bool = False __UpperCamelCase : bool = False __UpperCamelCase : bool = False __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for i in range(self.num_layers ): __SCREAMING_SNAKE_CASE = self.in_channels if (i == self.num_layers - 1) else self.out_channels __SCREAMING_SNAKE_CASE = self.prev_output_channel if i == 0 else self.out_channels __SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = resnets __SCREAMING_SNAKE_CASE = attentions if self.add_upsample: __SCREAMING_SNAKE_CASE = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self : int ,lowerCamelCase : Any ,lowerCamelCase : List[Any] ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' for resnet, attn in zip(self.resnets ,self.attentions ): # pop res hidden states __SCREAMING_SNAKE_CASE = res_hidden_states_tuple[-1] __SCREAMING_SNAKE_CASE = res_hidden_states_tuple[:-1] __SCREAMING_SNAKE_CASE = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) __SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase ) __SCREAMING_SNAKE_CASE = attn(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase ) if self.add_upsample: __SCREAMING_SNAKE_CASE = self.upsamplers_a(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : int __UpperCamelCase : int __UpperCamelCase : float = 0.0 __UpperCamelCase : int = 1 __UpperCamelCase : bool = True __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for i in range(self.num_layers ): __SCREAMING_SNAKE_CASE = self.in_channels if (i == self.num_layers - 1) else self.out_channels __SCREAMING_SNAKE_CASE = self.prev_output_channel if i == 0 else self.out_channels __SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = resnets if self.add_upsample: __SCREAMING_SNAKE_CASE = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self : Dict ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple=True ): '''simple docstring''' for resnet in self.resnets: # pop res hidden states __SCREAMING_SNAKE_CASE = res_hidden_states_tuple[-1] __SCREAMING_SNAKE_CASE = res_hidden_states_tuple[:-1] __SCREAMING_SNAKE_CASE = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) __SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase ) if self.add_upsample: __SCREAMING_SNAKE_CASE = self.upsamplers_a(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : float = 0.0 __UpperCamelCase : int = 1 __UpperCamelCase : int = 1 __UpperCamelCase : bool = False __UpperCamelCase : bool = False __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [ FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) ] __SCREAMING_SNAKE_CASE = [] for _ in range(self.num_layers ): __SCREAMING_SNAKE_CASE = FlaxTransformeraDModel( in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = resnets __SCREAMING_SNAKE_CASE = attentions def __call__( self : List[str] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ,lowerCamelCase : Tuple ,lowerCamelCase : Any=True ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.resnets[0](lowerCamelCase ,lowerCamelCase ) for attn, resnet in zip(self.attentions ,self.resnets[1:] ): __SCREAMING_SNAKE_CASE = attn(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase ) __SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase ) return hidden_states
13
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class __a ( _snake_case ): def __init__( self : Union[str, Any] ,**lowerCamelCase : str ): '''simple docstring''' super().__init__(**lowerCamelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ): '''simple docstring''' return super().__call__(lowerCamelCase ,**lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" ) __SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" ) __SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] ) ] return result
13
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser a = logging.getLogger(__name__) torch.set_grad_enabled(False) a = "cuda" if torch.cuda.is_available() else "cpu" def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=" " ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = text.split(__UpperCAmelCase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )] def __magic_name__ ( __UpperCAmelCase ) -> dict: '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(__UpperCAmelCase ): titles.append(title if title is not None else """""" ) texts.append(__UpperCAmelCase ) return {"title": titles, "text": texts} def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] __SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: '''simple docstring''' logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __SCREAMING_SNAKE_CASE = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __SCREAMING_SNAKE_CASE = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc ) # And compute the embeddings __SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __SCREAMING_SNAKE_CASE = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space __SCREAMING_SNAKE_CASE = dataset.map( partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , ) # And finally save your dataset __SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(__UpperCAmelCase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=__UpperCAmelCase ) # And save the index __SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(__UpperCAmelCase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __a : __UpperCamelCase : str = field( default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ), metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''}, ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'}, ) __UpperCamelCase : str = field( default='facebook/rag-sequence-nq', metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''}, ) __UpperCamelCase : str = field( default='facebook/dpr-ctx_encoder-multiset-base', metadata={ 'help': ( 'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or' ' \'facebook/dpr-ctx_encoder-multiset-base\'' ) }, ) __UpperCamelCase : Optional[str] = field( default=str(Path(_snake_case ).parent / 'test_run' / 'dummy-kb' ), metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'}, ) @dataclass class __a : __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': 'The number of processes to use to split the documents into passages. Default is single process.' }, ) __UpperCamelCase : int = field( default=16, metadata={ 'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.' }, ) @dataclass class __a : __UpperCamelCase : int = field( default=768, metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'}, ) __UpperCamelCase : int = field( default=128, metadata={ 'help': ( 'The number of bi-directional links created for every new element during the HNSW index construction.' ) }, ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) a , a , a = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: a = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
13
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers a = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' require_version(deps[pkg] , __UpperCAmelCase )
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a = { "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"], "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", "TransfoXLModel", "TransfoXLPreTrainedModel", "load_tf_weights_in_transfo_xl", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", "TFTransfoXLMainLayer", "TFTransfoXLModel", "TFTransfoXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a = logging.getLogger(__name__) @dataclass class __a : __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, ) __UpperCamelCase : int = field( default=1024, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} ) __UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __a : __UpperCamelCase : str = field( default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) __UpperCamelCase : str = field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) def __magic_name__ ( ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1] __SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __SCREAMING_SNAKE_CASE = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __SCREAMING_SNAKE_CASE = False # Some models have set the order of the labels to use, so let's make sure we do use it. __SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1} __SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__UpperCAmelCase ): # Tokenize the texts def _convert_table_text_to_pandas(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __SCREAMING_SNAKE_CASE = examples["""statement"""] __SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __SCREAMING_SNAKE_CASE = raw_datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""train"""] if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = default_data_collator elif training_args.fpaa: __SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 ) else: __SCREAMING_SNAKE_CASE = None # Initialize our Trainer __SCREAMING_SNAKE_CASE = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE = last_checkpoint __SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = train_result.metrics __SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __UpperCAmelCase ) trainer.save_metrics("""train""" , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics("""eval""" , __UpperCAmelCase ) trainer.save_metrics("""eval""" , __UpperCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" ) __SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) __SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(__UpperCAmelCase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = label_list[item] writer.write(f"""{index}\t{item}\n""" ) __SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Any: '''simple docstring''' main() if __name__ == "__main__": main()
13
1
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' return str(__UpperCAmelCase ) == str(__UpperCAmelCase )[::-1] def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return int(__UpperCAmelCase ) + int(str(__UpperCAmelCase )[::-1] ) def __magic_name__ ( __UpperCAmelCase = 10000 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for num in range(1 , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = num while iterations < 50: __SCREAMING_SNAKE_CASE = sum_reverse(__UpperCAmelCase ) iterations += 1 if is_palindrome(__UpperCAmelCase ): break else: lychrel_nums.append(__UpperCAmelCase ) return len(__UpperCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
13
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
13
1
'''simple docstring''' import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer a = "bart" a = True @st.cache(allow_output_mutation=__UpperCAmelCase ) def __magic_name__ ( ) -> List[str]: '''simple docstring''' if LOAD_DENSE_INDEX: __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" ) __SCREAMING_SNAKE_CASE = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" ) __SCREAMING_SNAKE_CASE = qar_model.eval() else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (None, None) if MODEL_TYPE == "bart": __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" ) __SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" ) __SCREAMING_SNAKE_CASE = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" ) sas_model.load_state_dict(save_dict["""model"""] ) __SCREAMING_SNAKE_CASE = sas_model.eval() else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = make_qa_sas_model( model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__UpperCAmelCase ) def __magic_name__ ( ) -> List[Any]: '''simple docstring''' if LOAD_DENSE_INDEX: __SCREAMING_SNAKE_CASE = faiss.StandardGpuResources() __SCREAMING_SNAKE_CASE = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""] __SCREAMING_SNAKE_CASE = np.memmap( """wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , ) __SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(128 ) __SCREAMING_SNAKE_CASE = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase ) wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (None, None) __SCREAMING_SNAKE_CASE = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__UpperCAmelCase ) def __magic_name__ ( ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" ) __SCREAMING_SNAKE_CASE = elia["""train_eli5"""] __SCREAMING_SNAKE_CASE = np.memmap( """eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) ) __SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__UpperCAmelCase ) return (elia_train, eli5_train_q_index) a , a , a = load_indexes() a , a , a , a = load_models() a , a = load_train_data() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=10 ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [elia_train[int(__UpperCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase="wiki40b" , __UpperCAmelCase="dense" , __UpperCAmelCase=10 ) -> Optional[int]: '''simple docstring''' if source == "none": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), []) else: if method == "dense": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = query_qa_dense_index( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = query_es_index( __UpperCAmelCase , __UpperCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = [ (res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst ] __SCREAMING_SNAKE_CASE = """question: {} context: {}""".format(__UpperCAmelCase , __UpperCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __UpperCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None), } ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=0.9_5 , __UpperCAmelCase=0.8 ) -> Any: '''simple docstring''' with torch.no_grad(): __SCREAMING_SNAKE_CASE = qa_sas_generate( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar a = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" a = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia a = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) a = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] a = st.sidebar.checkbox("Demo options") if demo_options: a = st.sidebar.selectbox( "", action_list, index=3, ) a = action_list.index(action_st) a = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) a = show_type == "Show full text of passages" else: a = 3 a = True a = st.sidebar.checkbox("Retrieval options") if retrieval_options: a = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) a = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) a = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: a = "wiki40b" a = "dense" a = "beam" a = 2 a = 64 a = 256 a = None a = None a = st.sidebar.checkbox("Generation options") if generate_options: a = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) a = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) a = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) a = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": a = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: a = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) a = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) a = None # start main text a = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] a = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": a = st.text_input("Enter your question here:", "") else: a = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": a , a = make_support(question, source=wiki_source, method="dense", n_results=10) a , a = make_support(question, source=wiki_source, method="sparse", n_results=10) a = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] a = support_list[:10] a = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: a , a = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: a , a = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): a = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) a = res[1].strip() if sec_titles == "": a = "[{}]({})".format(res[0], wiki_url) else: a = sec_titles.split(" & ") a = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: a = find_nearest_training(question) a = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) a = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) a = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" ) __SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) __SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": a = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class __a ( _snake_case ): __UpperCamelCase : Tuple = 'camembert' def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class __a ( _snake_case ): @property def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
13
1
'''simple docstring''' from math import loga def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""Input value must be a 'int' type""" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __a ( unittest.TestCase ): def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 __SCREAMING_SNAKE_CASE = num_patches + 1 def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = ViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,) return config, pixel_values def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) __SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.type_sequence_label_size __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase ) __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(lowerCamelCase ) __SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = model_class(lowerCamelCase ) @jax.jit def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ): return model(pixel_values=lowerCamelCase ,**lowerCamelCase ) with self.subTest("""JIT Enabled""" ): __SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase )
13
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a = logging.get_logger(__name__) a = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : Dict = 'deta' __UpperCamelCase : List[str] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,): '''simple docstring''' if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] __SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase ) __SCREAMING_SNAKE_CASE = backbone_config __SCREAMING_SNAKE_CASE = num_queries __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = init_xavier_std __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = auxiliary_loss __SCREAMING_SNAKE_CASE = position_embedding_type # deformable attributes __SCREAMING_SNAKE_CASE = num_feature_levels __SCREAMING_SNAKE_CASE = encoder_n_points __SCREAMING_SNAKE_CASE = decoder_n_points __SCREAMING_SNAKE_CASE = two_stage __SCREAMING_SNAKE_CASE = two_stage_num_proposals __SCREAMING_SNAKE_CASE = with_box_refine __SCREAMING_SNAKE_CASE = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __SCREAMING_SNAKE_CASE = class_cost __SCREAMING_SNAKE_CASE = bbox_cost __SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients __SCREAMING_SNAKE_CASE = mask_loss_coefficient __SCREAMING_SNAKE_CASE = dice_loss_coefficient __SCREAMING_SNAKE_CASE = bbox_loss_coefficient __SCREAMING_SNAKE_CASE = giou_loss_coefficient __SCREAMING_SNAKE_CASE = eos_coefficient __SCREAMING_SNAKE_CASE = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase ) @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return self.encoder_attention_heads @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.d_model def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output
13
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class __a ( _snake_case ): __UpperCamelCase : Tuple = 'sew' def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,): '''simple docstring''' super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = feat_extract_norm __SCREAMING_SNAKE_CASE = feat_extract_activation __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = conv_bias __SCREAMING_SNAKE_CASE = num_conv_pos_embeddings __SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups __SCREAMING_SNAKE_CASE = len(self.conv_dim ) __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = squeeze_factor __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = feat_proj_dropout __SCREAMING_SNAKE_CASE = final_dropout __SCREAMING_SNAKE_CASE = layerdrop __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __SCREAMING_SNAKE_CASE = apply_spec_augment __SCREAMING_SNAKE_CASE = mask_time_prob __SCREAMING_SNAKE_CASE = mask_time_length __SCREAMING_SNAKE_CASE = mask_time_min_masks __SCREAMING_SNAKE_CASE = mask_feature_prob __SCREAMING_SNAKE_CASE = mask_feature_length __SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss __SCREAMING_SNAKE_CASE = ctc_loss_reduction __SCREAMING_SNAKE_CASE = ctc_zero_infinity # sequence classification __SCREAMING_SNAKE_CASE = use_weighted_layer_sum __SCREAMING_SNAKE_CASE = classifier_proj_size @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return functools.reduce(operator.mul ,self.conv_stride ,1 )
13
1
'''simple docstring''' from graphs.minimum_spanning_tree_kruskal import kruskal def __magic_name__ ( ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = 9 __SCREAMING_SNAKE_CASE = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] __SCREAMING_SNAKE_CASE = kruskal(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(__UpperCAmelCase ) == sorted(__UpperCAmelCase )
13
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 0 for divide_by_number in range(__UpperCAmelCase , digit + 1 ): __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = divide_by_number else: has_been_divided.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated a = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ a = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __UpperCAmelCase ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__UpperCAmelCase )[0] @deprecated(__UpperCAmelCase , """Please use tf.data to implement this functionality.""" ) def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' print("""Extracting""" , f.name ) with gzip.GzipFile(fileobj=__UpperCAmelCase ) as bytestream: __SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase ) if magic != 2051: raise ValueError( """Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) ) __SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = bytestream.read(rows * cols * num_images ) __SCREAMING_SNAKE_CASE = numpy.frombuffer(__UpperCAmelCase , dtype=numpy.uinta ) __SCREAMING_SNAKE_CASE = data.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 1 ) return data @deprecated(__UpperCAmelCase , """Please use tf.one_hot on tensors.""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __SCREAMING_SNAKE_CASE = labels_dense.shape[0] __SCREAMING_SNAKE_CASE = numpy.arange(__UpperCAmelCase ) * num_classes __SCREAMING_SNAKE_CASE = numpy.zeros((num_labels, num_classes) ) __SCREAMING_SNAKE_CASE = 1 return labels_one_hot @deprecated(__UpperCAmelCase , """Please use tf.data to implement this functionality.""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=10 ) -> Optional[Any]: '''simple docstring''' print("""Extracting""" , f.name ) with gzip.GzipFile(fileobj=__UpperCAmelCase ) as bytestream: __SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase ) if magic != 2049: raise ValueError( """Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) ) __SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = bytestream.read(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = numpy.frombuffer(__UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__UpperCAmelCase , __UpperCAmelCase ) return labels class __a : @deprecated( lowerCamelCase ,"""Please use alternatives such as official/mnist/_DataSet.py""" """ from tensorflow/models.""" ,) def __init__( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : int ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : List[str]=dtypes.floataa ,lowerCamelCase : Tuple=True ,lowerCamelCase : List[Any]=None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = random_seed.get_seed(lowerCamelCase ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __SCREAMING_SNAKE_CASE = dtypes.as_dtype(lowerCamelCase ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype ) if fake_data: __SCREAMING_SNAKE_CASE = 1_0000 __SCREAMING_SNAKE_CASE = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f"""images.shape: {images.shape} labels.shape: {labels.shape}""" __SCREAMING_SNAKE_CASE = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __SCREAMING_SNAKE_CASE = images.reshape( images.shape[0] ,images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __SCREAMING_SNAKE_CASE = images.astype(numpy.floataa ) __SCREAMING_SNAKE_CASE = numpy.multiply(lowerCamelCase ,1.0 / 255.0 ) __SCREAMING_SNAKE_CASE = images __SCREAMING_SNAKE_CASE = labels __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 @property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return self._images @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self._labels @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return self._num_examples @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return self._epochs_completed def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Any=False ,lowerCamelCase : Dict=True ): '''simple docstring''' if fake_data: __SCREAMING_SNAKE_CASE = [1] * 784 __SCREAMING_SNAKE_CASE = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(lowerCamelCase )], [fake_label for _ in range(lowerCamelCase )], ) __SCREAMING_SNAKE_CASE = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.images[perma] __SCREAMING_SNAKE_CASE = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __SCREAMING_SNAKE_CASE = self._num_examples - start __SCREAMING_SNAKE_CASE = self._images[start : self._num_examples] __SCREAMING_SNAKE_CASE = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples ) numpy.random.shuffle(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.images[perm] __SCREAMING_SNAKE_CASE = self.labels[perm] # Start next epoch __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = batch_size - rest_num_examples __SCREAMING_SNAKE_CASE = self._index_in_epoch __SCREAMING_SNAKE_CASE = self._images[start:end] __SCREAMING_SNAKE_CASE = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) ,axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) ,axis=0 ), ) else: self._index_in_epoch += batch_size __SCREAMING_SNAKE_CASE = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__UpperCAmelCase , """Please write your own downloading logic.""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if not gfile.Exists(__UpperCAmelCase ): gfile.MakeDirs(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase ) if not gfile.Exists(__UpperCAmelCase ): urllib.request.urlretrieve(__UpperCAmelCase , __UpperCAmelCase ) # noqa: S310 with gfile.GFile(__UpperCAmelCase ) as f: __SCREAMING_SNAKE_CASE = f.size() print("""Successfully downloaded""" , __UpperCAmelCase , __UpperCAmelCase , """bytes.""" ) return filepath @deprecated( __UpperCAmelCase , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=5000 , __UpperCAmelCase=None , __UpperCAmelCase=DEFAULT_SOURCE_URL , ) -> int: '''simple docstring''' if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__UpperCAmelCase , one_hot=__UpperCAmelCase , dtype=__UpperCAmelCase , seed=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = fake() __SCREAMING_SNAKE_CASE = fake() __SCREAMING_SNAKE_CASE = fake() return _Datasets(train=__UpperCAmelCase , validation=__UpperCAmelCase , test=__UpperCAmelCase ) if not source_url: # empty string check __SCREAMING_SNAKE_CASE = DEFAULT_SOURCE_URL __SCREAMING_SNAKE_CASE = """train-images-idx3-ubyte.gz""" __SCREAMING_SNAKE_CASE = """train-labels-idx1-ubyte.gz""" __SCREAMING_SNAKE_CASE = """t10k-images-idx3-ubyte.gz""" __SCREAMING_SNAKE_CASE = """t10k-labels-idx1-ubyte.gz""" __SCREAMING_SNAKE_CASE = _maybe_download( __UpperCAmelCase , __UpperCAmelCase , source_url + train_images_file ) with gfile.Open(__UpperCAmelCase , """rb""" ) as f: __SCREAMING_SNAKE_CASE = _extract_images(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = _maybe_download( __UpperCAmelCase , __UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(__UpperCAmelCase , """rb""" ) as f: __SCREAMING_SNAKE_CASE = _extract_labels(__UpperCAmelCase , one_hot=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = _maybe_download( __UpperCAmelCase , __UpperCAmelCase , source_url + test_images_file ) with gfile.Open(__UpperCAmelCase , """rb""" ) as f: __SCREAMING_SNAKE_CASE = _extract_images(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = _maybe_download( __UpperCAmelCase , __UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(__UpperCAmelCase , """rb""" ) as f: __SCREAMING_SNAKE_CASE = _extract_labels(__UpperCAmelCase , one_hot=__UpperCAmelCase ) if not 0 <= validation_size <= len(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = ( """Validation size should be between 0 and """ f"""{len(__UpperCAmelCase )}. Received: {validation_size}.""" ) raise ValueError(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = train_images[:validation_size] __SCREAMING_SNAKE_CASE = train_labels[:validation_size] __SCREAMING_SNAKE_CASE = train_images[validation_size:] __SCREAMING_SNAKE_CASE = train_labels[validation_size:] __SCREAMING_SNAKE_CASE = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed} __SCREAMING_SNAKE_CASE = _DataSet(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = _DataSet(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = _DataSet(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) return _Datasets(train=__UpperCAmelCase , validation=__UpperCAmelCase , test=__UpperCAmelCase )
13
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __a ( unittest.TestCase ): def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224} __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_convert_rgb def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __SCREAMING_SNAKE_CASE = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) ) else: __SCREAMING_SNAKE_CASE = [] for i in range(self.batch_size ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 ) image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs] if torchify: __SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase ) @property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase ) __SCREAMING_SNAKE_CASE = 3 @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
13
1
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder a = "base_with_context" def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) ) __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): __SCREAMING_SNAKE_CASE = weights[f"""layers_{lyr_num}"""] __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) __SCREAMING_SNAKE_CASE = ly_weight["""attention"""] __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): __SCREAMING_SNAKE_CASE = weights[f"""layers_{lyr_num}"""] __SCREAMING_SNAKE_CASE = ly_weight["""attention"""] __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) ) for lyr_num, lyr in enumerate(model.decoders ): __SCREAMING_SNAKE_CASE = weights[f"""layers_{lyr_num}"""] __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) ) __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = ly_weight["""self_attention"""] __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = ly_weight["""MultiHeadDotProductAttention_0"""] __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) __SCREAMING_SNAKE_CASE = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) ) return model def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(args.checkpoint_path ) __SCREAMING_SNAKE_CASE = jnp.tree_util.tree_map(onp.array , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [ """from __gin__ import dynamic_registration""", """from music_spectrogram_diffusion.models.diffusion import diffusion_utils""", """diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""", """diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""", ] __SCREAMING_SNAKE_CASE = os.path.join(args.checkpoint_path , """..""" , """config.gin""" ) __SCREAMING_SNAKE_CASE = inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" ) __SCREAMING_SNAKE_CASE = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) __SCREAMING_SNAKE_CASE = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) __SCREAMING_SNAKE_CASE = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) __SCREAMING_SNAKE_CASE = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" ) __SCREAMING_SNAKE_CASE = SpectrogramDiffusionPipeline( notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F'''{MODEL}/checkpoint_500000''', type=str, required=False, help="Path to the original jax model checkpoint.", ) a = parser.parse_args() main(args)
13
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __magic_name__ ( __UpperCAmelCase ) -> Tuple: '''simple docstring''' def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = timeit.default_timer() __SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime return delta __SCREAMING_SNAKE_CASE = func.__name__ return wrapper def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = seq_shapes or {} for i in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__UpperCAmelCase , _ArrayXD ): __SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__UpperCAmelCase , datasets.Value ): if v.dtype == "string": __SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged.""" else: __SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__UpperCAmelCase , datasets.Sequence ): while isinstance(__UpperCAmelCase , datasets.Sequence ): __SCREAMING_SNAKE_CASE = v.feature __SCREAMING_SNAKE_CASE = seq_shapes[k] __SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype ) __SCREAMING_SNAKE_CASE = data dummy_data.append((i, example) ) return dummy_data def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase ) with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer: for key, record in dummy_data: __SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase ) writer.write(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) __SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) ) return dataset
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available a = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a = "__DUMMY_TRANSFORMERS_USER__" a = "Dummy User" a = "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" a = "https://hub-ci.huggingface.co" a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}" a = Path("~/.huggingface/hub_ci_token").expanduser() @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(__UpperCAmelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __magic_name__ ( ) -> Optional[Any]: '''simple docstring''' return HfApi(endpoint=__UpperCAmelCase ) @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfFolder.get_token() HfFolder.save_token(__UpperCAmelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' def _cleanup_repo(__UpperCAmelCase ): hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' @contextmanager def _temporary_repo(__UpperCAmelCase ): try: yield repo_id finally: cleanup_repo(__UpperCAmelCase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
13
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> Tuple: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __SCREAMING_SNAKE_CASE = [] for i in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class __a ( _snake_case, _snake_case ): __UpperCamelCase : int = [e.name for e in KarrasDiffusionSchedulers] __UpperCamelCase : str = 2 @register_to_config def __init__( self : int ,lowerCamelCase : int = 1000 ,lowerCamelCase : float = 0.00_085 ,lowerCamelCase : float = 0.012 ,lowerCamelCase : str = "linear" ,lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : float = 1.0 ,lowerCamelCase : str = "linspace" ,lowerCamelCase : int = 0 ,): '''simple docstring''' if trained_betas is not None: __SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase ,dtype=torch.floataa ) elif beta_schedule == "linear": __SCREAMING_SNAKE_CASE = torch.linspace(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowerCamelCase ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase ,alpha_transform_type="""cosine""" ) elif beta_schedule == "exp": __SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase ,alpha_transform_type="""exp""" ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) __SCREAMING_SNAKE_CASE = 1.0 - self.betas __SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = use_karras_sigmas def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Dict=None ): '''simple docstring''' if schedule_timesteps is None: __SCREAMING_SNAKE_CASE = self.timesteps __SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __SCREAMING_SNAKE_CASE = 1 if len(lowerCamelCase ) > 1 else 0 else: __SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep __SCREAMING_SNAKE_CASE = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Union[float, torch.FloatTensor] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.sigmas[step_index] __SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ,lowerCamelCase : Optional[int] = None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = num_inference_steps __SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __SCREAMING_SNAKE_CASE = np.linspace(0 ,num_train_timesteps - 1 ,lowerCamelCase ,dtype=lowerCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": __SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE = (np.arange(lowerCamelCase ,0 ,-step_ratio )).round().copy().astype(lowerCamelCase ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) __SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __SCREAMING_SNAKE_CASE = np.log(lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.interp(lowerCamelCase ,np.arange(0 ,len(lowerCamelCase ) ) ,lowerCamelCase ) if self.config.use_karras_sigmas: __SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=lowerCamelCase ,num_inference_steps=self.num_inference_steps ) __SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(lowerCamelCase ,lowerCamelCase ) for sigma in sigmas] ) __SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(lowerCamelCase ).startswith("""mps""" ): # mps does not support float64 __SCREAMING_SNAKE_CASE = timesteps.to(lowerCamelCase ,dtype=torch.floataa ) else: __SCREAMING_SNAKE_CASE = timesteps.to(device=lowerCamelCase ) # empty dt and derivative __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __SCREAMING_SNAKE_CASE = defaultdict(lowerCamelCase ) def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = np.log(lowerCamelCase ) # get distribution __SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __SCREAMING_SNAKE_CASE = low_idx + 1 __SCREAMING_SNAKE_CASE = log_sigmas[low_idx] __SCREAMING_SNAKE_CASE = log_sigmas[high_idx] # interpolate sigmas __SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high) __SCREAMING_SNAKE_CASE = np.clip(lowerCamelCase ,0 ,1 ) # transform interpolation to time range __SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx __SCREAMING_SNAKE_CASE = t.reshape(sigma.shape ) return t def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = in_sigmas[-1].item() __SCREAMING_SNAKE_CASE = in_sigmas[0].item() __SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper __SCREAMING_SNAKE_CASE = np.linspace(0 ,1 ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho) __SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho) __SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.dt is None def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : Union[float, torch.FloatTensor] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : bool = True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase ) # advance index counter by 1 __SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __SCREAMING_SNAKE_CASE = self.sigmas[step_index] __SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1] else: # 2nd order / Heun's method __SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1] __SCREAMING_SNAKE_CASE = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next __SCREAMING_SNAKE_CASE = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next __SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: __SCREAMING_SNAKE_CASE = pred_original_sample.clamp( -self.config.clip_sample_range ,self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __SCREAMING_SNAKE_CASE = sigma_next - sigma_hat # store for 2nd order step __SCREAMING_SNAKE_CASE = derivative __SCREAMING_SNAKE_CASE = dt __SCREAMING_SNAKE_CASE = sample else: # 2. 2nd order / Heun's method __SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next __SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __SCREAMING_SNAKE_CASE = self.dt __SCREAMING_SNAKE_CASE = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase ): # mps does not support float64 __SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) __SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: __SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE = [self.index_for_timestep(lowerCamelCase ,lowerCamelCase ) for t in timesteps] __SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE = original_samples + noise * sigma return noisy_samples def __len__( self : Optional[int] ): '''simple docstring''' return self.config.num_train_timesteps
13
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a = logging.get_logger(__name__) a = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : Dict = 'deta' __UpperCamelCase : List[str] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,): '''simple docstring''' if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] __SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase ) __SCREAMING_SNAKE_CASE = backbone_config __SCREAMING_SNAKE_CASE = num_queries __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = init_xavier_std __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = auxiliary_loss __SCREAMING_SNAKE_CASE = position_embedding_type # deformable attributes __SCREAMING_SNAKE_CASE = num_feature_levels __SCREAMING_SNAKE_CASE = encoder_n_points __SCREAMING_SNAKE_CASE = decoder_n_points __SCREAMING_SNAKE_CASE = two_stage __SCREAMING_SNAKE_CASE = two_stage_num_proposals __SCREAMING_SNAKE_CASE = with_box_refine __SCREAMING_SNAKE_CASE = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __SCREAMING_SNAKE_CASE = class_cost __SCREAMING_SNAKE_CASE = bbox_cost __SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients __SCREAMING_SNAKE_CASE = mask_loss_coefficient __SCREAMING_SNAKE_CASE = dice_loss_coefficient __SCREAMING_SNAKE_CASE = bbox_loss_coefficient __SCREAMING_SNAKE_CASE = giou_loss_coefficient __SCREAMING_SNAKE_CASE = eos_coefficient __SCREAMING_SNAKE_CASE = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase ) @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return self.encoder_attention_heads @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.d_model def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output
13
1
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=5 ) -> int: '''simple docstring''' assert masked_input.count("""<mask>""" ) == 1 __SCREAMING_SNAKE_CASE = torch.tensor(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) ).unsqueeze(0 ) # Batch size 1 __SCREAMING_SNAKE_CASE = model(__UpperCAmelCase )[0] # The last hidden-state is the first element of the output tuple __SCREAMING_SNAKE_CASE = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() __SCREAMING_SNAKE_CASE = logits[0, masked_index, :] __SCREAMING_SNAKE_CASE = logits.softmax(dim=0 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = prob.topk(k=__UpperCAmelCase , dim=0 ) __SCREAMING_SNAKE_CASE = """ """.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__UpperCAmelCase ) )] ) __SCREAMING_SNAKE_CASE = tokenizer.mask_token __SCREAMING_SNAKE_CASE = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ): __SCREAMING_SNAKE_CASE = predicted_token_bpe.replace("""\u2581""" , """ """ ) if " {0}".format(__UpperCAmelCase ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(""" {0}""".format(__UpperCAmelCase ) , __UpperCAmelCase ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(__UpperCAmelCase , __UpperCAmelCase ), values[index].item(), predicted_token, ) ) return topk_filled_outputs a = CamembertTokenizer.from_pretrained("camembert-base") a = CamembertForMaskedLM.from_pretrained("camembert-base") model.eval() a = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
13
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : List[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape __SCREAMING_SNAKE_CASE = jax.image.resize( lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,) __SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : List[str] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : int = None __UpperCamelCase : float = 0.0 __UpperCamelCase : bool = None __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels __SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype ) __SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob ) __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __SCREAMING_SNAKE_CASE = None if use_nin_shortcut: __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,) def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' __SCREAMING_SNAKE_CASE = hidden_states __SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) ) __SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 ) __SCREAMING_SNAKE_CASE = hidden_states + temb __SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase ) if self.conv_shortcut is not None: __SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase ) return hidden_states + residual
13
1
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING a = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class __a ( _snake_case ): def __init__( self : Any ,**lowerCamelCase : Any ): '''simple docstring''' super().__init__(**lowerCamelCase ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self ,"""vision""" ) self.check_model_type(lowerCamelCase ) def __call__( self : List[str] ,lowerCamelCase : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowerCamelCase : Union[str, List[str]] = None ,**lowerCamelCase : Optional[Any] ,): '''simple docstring''' if "text_queries" in kwargs: __SCREAMING_SNAKE_CASE = kwargs.pop("""text_queries""" ) if isinstance(lowerCamelCase ,(str, Image.Image) ): __SCREAMING_SNAKE_CASE = {"""image""": image, """candidate_labels""": candidate_labels} else: __SCREAMING_SNAKE_CASE = image __SCREAMING_SNAKE_CASE = super().__call__(lowerCamelCase ,**lowerCamelCase ) return results def UpperCAmelCase__ ( self : Any ,**lowerCamelCase : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} if "threshold" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""threshold"""] if "top_k" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""top_k"""] return {}, {}, postprocess_params def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image(inputs["""image"""] ) __SCREAMING_SNAKE_CASE = inputs["""candidate_labels"""] if isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = candidate_labels.split(""",""" ) __SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa ) for i, candidate_label in enumerate(lowerCamelCase ): __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.image_processor(lowerCamelCase ,return_tensors=self.framework ) yield { "is_last": i == len(lowerCamelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_inputs.pop("""target_size""" ) __SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_label""" ) __SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" ) __SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ) __SCREAMING_SNAKE_CASE = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Tuple ,lowerCamelCase : Any=0.1 ,lowerCamelCase : Optional[int]=None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for model_output in model_outputs: __SCREAMING_SNAKE_CASE = model_output["""candidate_label"""] __SCREAMING_SNAKE_CASE = BaseModelOutput(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection( outputs=lowerCamelCase ,threshold=lowerCamelCase ,target_sizes=model_output["""target_size"""] )[0] for index in outputs["scores"].nonzero(): __SCREAMING_SNAKE_CASE = outputs["""scores"""][index].item() __SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs["""boxes"""][index][0] ) __SCREAMING_SNAKE_CASE = {"""score""": score, """label""": label, """box""": box} results.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = sorted(lowerCamelCase ,key=lambda lowerCamelCase : x["score"] ,reverse=lowerCamelCase ) if top_k: __SCREAMING_SNAKE_CASE = results[:top_k] return results def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = box.int().tolist() __SCREAMING_SNAKE_CASE = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
13
'''simple docstring''' import sys from collections import defaultdict class __a : def __init__( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pos def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: __SCREAMING_SNAKE_CASE = 2 * start + 2 if heap[smallest_child] < heap[start]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( heap[start], positions[start], ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa __SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,lowerCamelCase ) self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = position[index] while index != 0: __SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __SCREAMING_SNAKE_CASE = heap[parent] __SCREAMING_SNAKE_CASE = position[parent] self.set_position(position[parent] ,lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,lowerCamelCase ) break __SCREAMING_SNAKE_CASE = parent else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,0 ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1 for i in range(lowerCamelCase ,-1 ,-1 ): self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = positions[0] __SCREAMING_SNAKE_CASE = sys.maxsize self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase ) return temp def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = Heap() __SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex __SCREAMING_SNAKE_CASE = [] for vertex in range(len(__UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__UpperCAmelCase ) heap.node_position.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = sys.maxsize for neighbor, distance in adjacency_list[0]: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = distance heap.heapify(__UpperCAmelCase , __UpperCAmelCase ) for _ in range(1 , len(__UpperCAmelCase ) ): __SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __SCREAMING_SNAKE_CASE = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__UpperCAmelCase )] ): __SCREAMING_SNAKE_CASE = distance heap.bottom_to_top( __UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a = int(input("Enter number of edges: ").strip()) a = defaultdict(list) for _ in range(edges_number): a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def __magic_name__ ( __UpperCAmelCase ) -> List[str]: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' if args.student_type == "roberta": __SCREAMING_SNAKE_CASE = False elif args.student_type == "gpt2": __SCREAMING_SNAKE_CASE = False def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' if args.student_type == "roberta": __SCREAMING_SNAKE_CASE = False def __magic_name__ ( ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Training""" ) parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" ) parser.add_argument( """--dump_path""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" ) parser.add_argument( """--data_file""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , ) parser.add_argument( """--student_type""" , type=__UpperCAmelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__UpperCAmelCase , help="""The student type (DistilBERT, RoBERTa).""" , ) parser.add_argument("""--student_config""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to the student configuration.""" ) parser.add_argument( """--student_pretrained_weights""" , default=__UpperCAmelCase , type=__UpperCAmelCase , help="""Load student initialization checkpoint.""" ) parser.add_argument( """--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__UpperCAmelCase , help="""Teacher type (BERT, RoBERTa).""" ) parser.add_argument("""--teacher_name""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""The teacher model.""" ) parser.add_argument("""--temperature""" , default=2.0 , type=__UpperCAmelCase , help="""Temperature for the softmax temperature.""" ) parser.add_argument( """--alpha_ce""" , default=0.5 , type=__UpperCAmelCase , help="""Linear weight for the distillation loss. Must be >=0.""" ) parser.add_argument( """--alpha_mlm""" , default=0.0 , type=__UpperCAmelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , ) parser.add_argument("""--alpha_clm""" , default=0.5 , type=__UpperCAmelCase , help="""Linear weight for the CLM loss. Must be >=0.""" ) parser.add_argument("""--alpha_mse""" , default=0.0 , type=__UpperCAmelCase , help="""Linear weight of the MSE loss. Must be >=0.""" ) parser.add_argument( """--alpha_cos""" , default=0.0 , type=__UpperCAmelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" ) parser.add_argument( """--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" ) parser.add_argument( """--mlm_mask_prop""" , default=0.1_5 , type=__UpperCAmelCase , help="""Proportion of tokens for which we need to make a prediction.""" , ) parser.add_argument("""--word_mask""" , default=0.8 , type=__UpperCAmelCase , help="""Proportion of tokens to mask out.""" ) parser.add_argument("""--word_keep""" , default=0.1 , type=__UpperCAmelCase , help="""Proportion of tokens to keep.""" ) parser.add_argument("""--word_rand""" , default=0.1 , type=__UpperCAmelCase , help="""Proportion of tokens to randomly replace.""" ) parser.add_argument( """--mlm_smoothing""" , default=0.7 , type=__UpperCAmelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , ) parser.add_argument("""--token_counts""" , type=__UpperCAmelCase , help="""The token counts in the data_file for MLM.""" ) parser.add_argument( """--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , ) parser.add_argument( """--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , ) parser.add_argument( """--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , ) parser.add_argument("""--n_epoch""" , type=__UpperCAmelCase , default=3 , help="""Number of pass on the whole dataset.""" ) parser.add_argument("""--batch_size""" , type=__UpperCAmelCase , default=5 , help="""Batch size (for each process).""" ) parser.add_argument( """--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__UpperCAmelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , ) parser.add_argument("""--warmup_prop""" , default=0.0_5 , type=__UpperCAmelCase , help="""Linear warmup proportion.""" ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__UpperCAmelCase , help="""Weight decay if we apply some.""" ) parser.add_argument("""--learning_rate""" , default=5e-4 , type=__UpperCAmelCase , help="""The initial learning rate for Adam.""" ) parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=__UpperCAmelCase , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__UpperCAmelCase , help="""Max gradient norm.""" ) parser.add_argument("""--initializer_range""" , default=0.0_2 , type=__UpperCAmelCase , help="""Random initialization range.""" ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=__UpperCAmelCase , default="""O1""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_gpu""" , type=__UpperCAmelCase , default=1 , help="""Number of GPUs in the node.""" ) parser.add_argument("""--local_rank""" , type=__UpperCAmelCase , default=-1 , help="""Distributed training - Local rank""" ) parser.add_argument("""--seed""" , type=__UpperCAmelCase , default=56 , help="""Random seed""" ) parser.add_argument("""--log_interval""" , type=__UpperCAmelCase , default=500 , help="""Tensorboard logging interval.""" ) parser.add_argument("""--checkpoint_interval""" , type=__UpperCAmelCase , default=4000 , help="""Checkpoint interval.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() sanity_checks(__UpperCAmelCase ) # ARGS # init_gpu_params(__UpperCAmelCase ) set_seed(__UpperCAmelCase ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite""" """ itUse `--force` if you want to overwrite it""" ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" ) # SAVE PARAMS # logger.info(f"""Param: {args}""" ) with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f: json.dump(vars(__UpperCAmelCase ) , __UpperCAmelCase , indent=4 ) git_log(args.dump_path ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[args.student_type] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[args.teacher_type] # TOKENIZER # __SCREAMING_SNAKE_CASE = teacher_tokenizer_class.from_pretrained(args.teacher_name ) __SCREAMING_SNAKE_CASE = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): __SCREAMING_SNAKE_CASE = tokenizer.all_special_tokens.index(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = tokenizer.all_special_ids[idx] logger.info(f"""Special tokens {special_tok_ids}""" ) __SCREAMING_SNAKE_CASE = special_tok_ids __SCREAMING_SNAKE_CASE = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"""Loading data from {args.data_file}""" ) with open(args.data_file , """rb""" ) as fp: __SCREAMING_SNAKE_CASE = pickle.load(__UpperCAmelCase ) if args.mlm: logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" ) with open(args.token_counts , """rb""" ) as fp: __SCREAMING_SNAKE_CASE = pickle.load(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = np.maximum(__UpperCAmelCase , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): __SCREAMING_SNAKE_CASE = 0.0 # do not predict special tokens __SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = LmSeqsDataset(params=__UpperCAmelCase , data=__UpperCAmelCase ) logger.info("""Data loader created.""" ) # STUDENT # logger.info(f"""Loading student config from {args.student_config}""" ) __SCREAMING_SNAKE_CASE = student_config_class.from_pretrained(args.student_config ) __SCREAMING_SNAKE_CASE = True if args.student_pretrained_weights is not None: logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" ) __SCREAMING_SNAKE_CASE = student_model_class.from_pretrained(args.student_pretrained_weights , config=__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = student_model_class(__UpperCAmelCase ) if args.n_gpu > 0: student.to(f"""cuda:{args.local_rank}""" ) logger.info("""Student loaded.""" ) # TEACHER # __SCREAMING_SNAKE_CASE = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__UpperCAmelCase ) if args.n_gpu > 0: teacher.to(f"""cuda:{args.local_rank}""" ) logger.info(f"""Teacher loaded from {args.teacher_name}.""" ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__UpperCAmelCase , __UpperCAmelCase ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__UpperCAmelCase , __UpperCAmelCase ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() __SCREAMING_SNAKE_CASE = Distiller( params=__UpperCAmelCase , dataset=__UpperCAmelCase , token_probs=__UpperCAmelCase , student=__UpperCAmelCase , teacher=__UpperCAmelCase ) distiller.train() logger.info("""Let's go get some drinks.""" ) if __name__ == "__main__": main()
13
'''simple docstring''' import os import string import sys a = 1 << 8 a = { "tab": ord("\t"), "newline": ord("\r"), "esc": 27, "up": 65 + ARROW_KEY_FLAG, "down": 66 + ARROW_KEY_FLAG, "right": 67 + ARROW_KEY_FLAG, "left": 68 + ARROW_KEY_FLAG, "mod_int": 91, "undefined": sys.maxsize, "interrupt": 3, "insert": 50, "delete": 51, "pg_up": 53, "pg_down": 54, } a = KEYMAP["up"] a = KEYMAP["left"] if sys.platform == "win32": a = [] a = { b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, } for i in range(10): a = ord(str(i)) def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' if os.name == "nt": import msvcrt __SCREAMING_SNAKE_CASE = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(__UpperCAmelCase ) == 0: # Read the keystroke __SCREAMING_SNAKE_CASE = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __SCREAMING_SNAKE_CASE = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(__UpperCAmelCase ) if ord(__UpperCAmelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) __SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] ) except KeyError: __SCREAMING_SNAKE_CASE = cha[1] else: __SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __SCREAMING_SNAKE_CASE = sys.stdin.fileno() __SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase ) try: tty.setraw(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = sys.stdin.read(1 ) finally: termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase ) return ch def __magic_name__ ( ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(__UpperCAmelCase ) == KEYMAP["esc"]: __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]: __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
13
1
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore a = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data: '''simple docstring''' __SCREAMING_SNAKE_CASE = """//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(__UpperCAmelCase ).content ).xpath(__UpperCAmelCase ) ) a = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
13
'''simple docstring''' from __future__ import annotations import bisect def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' if hi < 0: __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) while lo < hi: __SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __SCREAMING_SNAKE_CASE = mid + 1 else: __SCREAMING_SNAKE_CASE = mid return lo def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' if hi < 0: __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) while lo < hi: __SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __SCREAMING_SNAKE_CASE = mid + 1 else: __SCREAMING_SNAKE_CASE = mid return lo def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1 while left <= right: __SCREAMING_SNAKE_CASE = left + (right - left) // 2 __SCREAMING_SNAKE_CASE = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __SCREAMING_SNAKE_CASE = midpoint - 1 else: __SCREAMING_SNAKE_CASE = midpoint + 1 return None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' __SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase ) if index != len(__UpperCAmelCase ) and sorted_collection[index] == item: return index return None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' if right < left: return None __SCREAMING_SNAKE_CASE = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 ) else: return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase ) if __name__ == "__main__": a = input("Enter numbers separated by comma:\n").strip() a = sorted(int(item) for item in user_input.split(",")) a = int(input("Enter a single number to be found in the list:\n")) a = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
13
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a = list[list[float | int]] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __UpperCAmelCase ): for row in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(__UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase ) ] def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase ) def interpolated_func(__UpperCAmelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__UpperCAmelCase ) ) return interpolated_func def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ): x_val += 1 ret += poly(__UpperCAmelCase ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
13
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging a = logging.get_logger(__name__) class __a ( _snake_case ): __UpperCamelCase : int = 'linear' __UpperCamelCase : Tuple = 'cosine' __UpperCamelCase : Tuple = 'cosine_with_restarts' __UpperCamelCase : List[Any] = 'polynomial' __UpperCamelCase : Optional[Any] = 'constant' __UpperCamelCase : Optional[int] = 'constant_with_warmup' __UpperCamelCase : List[Any] = 'piecewise_constant' def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) ) return 1.0 return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" ) __SCREAMING_SNAKE_CASE = int(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = float(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = value __SCREAMING_SNAKE_CASE = float(rule_list[-1] ) def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ): def rule_func(__UpperCAmelCase ) -> float: __SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__UpperCAmelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __SCREAMING_SNAKE_CASE = lr_init - lr_end __SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps __SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps __SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) a = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
13
1
'''simple docstring''' import sys from collections import defaultdict class __a : def __init__( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pos def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: __SCREAMING_SNAKE_CASE = 2 * start + 2 if heap[smallest_child] < heap[start]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( heap[start], positions[start], ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa __SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,lowerCamelCase ) self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = position[index] while index != 0: __SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __SCREAMING_SNAKE_CASE = heap[parent] __SCREAMING_SNAKE_CASE = position[parent] self.set_position(position[parent] ,lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,lowerCamelCase ) break __SCREAMING_SNAKE_CASE = parent else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,0 ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1 for i in range(lowerCamelCase ,-1 ,-1 ): self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = positions[0] __SCREAMING_SNAKE_CASE = sys.maxsize self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase ) return temp def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = Heap() __SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex __SCREAMING_SNAKE_CASE = [] for vertex in range(len(__UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__UpperCAmelCase ) heap.node_position.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = sys.maxsize for neighbor, distance in adjacency_list[0]: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = distance heap.heapify(__UpperCAmelCase , __UpperCAmelCase ) for _ in range(1 , len(__UpperCAmelCase ) ): __SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __SCREAMING_SNAKE_CASE = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__UpperCAmelCase )] ): __SCREAMING_SNAKE_CASE = distance heap.bottom_to_top( __UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a = int(input("Enter number of edges: ").strip()) a = defaultdict(list) for _ in range(edges_number): a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be an integer""" raise TypeError(__UpperCAmelCase ) if number < 1: __SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be > 0""" raise ValueError(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = 1 for i in range(1 , __UpperCAmelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" ) __SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""" , class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
13
1
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } a = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } a = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = set() __SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __SCREAMING_SNAKE_CASE = char __SCREAMING_SNAKE_CASE = set(__UpperCAmelCase ) return pairs class __a ( _snake_case ): __UpperCamelCase : List[Any] = VOCAB_FILES_NAMES __UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple="__start__" ,lowerCamelCase : Tuple="__end__" ,lowerCamelCase : Dict="__unk__" ,lowerCamelCase : str="__null__" ,**lowerCamelCase : List[Any] ,): '''simple docstring''' super().__init__(unk_token=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,pad_token=lowerCamelCase ,**lowerCamelCase ) with open(lowerCamelCase ,encoding="""utf-8""" ) as vocab_handle: __SCREAMING_SNAKE_CASE = json.load(lowerCamelCase ) __SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} with open(lowerCamelCase ,encoding="""utf-8""" ) as merges_handle: __SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""" )[1:-1] __SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in merges] __SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) ) __SCREAMING_SNAKE_CASE = {} @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' return len(self.encoder ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : str ): '''simple docstring''' if token in self.cache: return self.cache[token] __SCREAMING_SNAKE_CASE = re.sub("""([.,!?()])""" ,r""" \1""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = re.sub("""(')""" ,r""" \1 """ ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = re.sub(r"""\s{2,}""" ,""" """ ,lowerCamelCase ) if "\n" in token: __SCREAMING_SNAKE_CASE = token.replace("""\n""" ,""" __newln__""" ) __SCREAMING_SNAKE_CASE = token.split(""" """ ) __SCREAMING_SNAKE_CASE = [] for token in tokens: if not len(lowerCamelCase ): continue __SCREAMING_SNAKE_CASE = token.lower() __SCREAMING_SNAKE_CASE = tuple(lowerCamelCase ) __SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) __SCREAMING_SNAKE_CASE = get_pairs(lowerCamelCase ) if not pairs: words.append(lowerCamelCase ) continue while True: __SCREAMING_SNAKE_CASE = min(lowerCamelCase ,key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 while i < len(lowerCamelCase ): try: __SCREAMING_SNAKE_CASE = word.index(lowerCamelCase ,lowerCamelCase ) new_word.extend(word[i:j] ) __SCREAMING_SNAKE_CASE = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __SCREAMING_SNAKE_CASE = tuple(lowerCamelCase ) __SCREAMING_SNAKE_CASE = new_word if len(lowerCamelCase ) == 1: break else: __SCREAMING_SNAKE_CASE = get_pairs(lowerCamelCase ) __SCREAMING_SNAKE_CASE = """@@ """.join(lowerCamelCase ) __SCREAMING_SNAKE_CASE = word[:-4] __SCREAMING_SNAKE_CASE = word words.append(lowerCamelCase ) return " ".join(lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = re.findall(r"""\S+\n?""" ,lowerCamelCase ) for token in words: split_tokens.extend(list(self.bpe(lowerCamelCase ).split(""" """ ) ) ) return split_tokens def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = token.lower() return self.encoder.get(lowerCamelCase ,self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ): '''simple docstring''' return self.decoder.get(lowerCamelCase ,self.unk_token ) def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """ """.join(lowerCamelCase ).replace("""@@ """ ,"""""" ).strip() return out_string def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase ,ensure_ascii=lowerCamelCase ) + """\n""" ) __SCREAMING_SNAKE_CASE = 0 with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) __SCREAMING_SNAKE_CASE = token_index writer.write(""" """.join(lowerCamelCase ) + """\n""" ) index += 1 return vocab_file, merge_file
13
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' if num < 0: return False __SCREAMING_SNAKE_CASE = num __SCREAMING_SNAKE_CASE = 0 while num > 0: __SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : List[Any] = 'rwkv' __UpperCamelCase : List[str] = {'max_position_embeddings': 'context_length'} def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[Any]=5_0277 ,lowerCamelCase : int=1024 ,lowerCamelCase : Any=4096 ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Optional[int]=1E-5 ,lowerCamelCase : List[Any]=0 ,lowerCamelCase : str=0 ,lowerCamelCase : List[Any]=6 ,lowerCamelCase : List[str]=False ,lowerCamelCase : Dict=True ,**lowerCamelCase : List[Any] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = context_length __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = attention_hidden_size if attention_hidden_size is not None else hidden_size __SCREAMING_SNAKE_CASE = intermediate_size if intermediate_size is not None else 4 * hidden_size __SCREAMING_SNAKE_CASE = layer_norm_epsilon __SCREAMING_SNAKE_CASE = rescale_every __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = eos_token_id super().__init__( tie_word_embeddings=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
13
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a = list[list[float | int]] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __UpperCAmelCase ): for row in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(__UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase ) ] def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase ) def interpolated_func(__UpperCAmelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__UpperCAmelCase ) ) return interpolated_func def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ): x_val += 1 ret += poly(__UpperCAmelCase ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
13
1
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
13
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class __a ( _snake_case ): def __init__( self : Union[str, Any] ,**lowerCamelCase : str ): '''simple docstring''' super().__init__(**lowerCamelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ): '''simple docstring''' return super().__call__(lowerCamelCase ,**lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" ) __SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" ) __SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] ) ] return result
13
1
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __a ( _snake_case, _snake_case, _snake_case ): __UpperCamelCase : List[Any] = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias'] @register_to_config def __init__( self : List[str] ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : int = 5_0257 ,lowerCamelCase : int = 1024 ,lowerCamelCase : int = 768 ,lowerCamelCase : int = 12 ,lowerCamelCase : int = 12 ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : str = "gelu_new" ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 1E-5 ,lowerCamelCase : float = 0.02 ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,): '''simple docstring''' super().__init__() __SCREAMING_SNAKE_CASE = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and""" f""" `n_embd`: {n_embd} are not equal.""" ) __SCREAMING_SNAKE_CASE = prefix_inner_dim __SCREAMING_SNAKE_CASE = prefix_hidden_dim __SCREAMING_SNAKE_CASE = ( nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __SCREAMING_SNAKE_CASE = ( nn.Linear(self.prefix_hidden_dim ,lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity() ) __SCREAMING_SNAKE_CASE = GPTaConfig( vocab_size=lowerCamelCase ,n_positions=lowerCamelCase ,n_embd=lowerCamelCase ,n_layer=lowerCamelCase ,n_head=lowerCamelCase ,n_inner=lowerCamelCase ,activation_function=lowerCamelCase ,resid_pdrop=lowerCamelCase ,embd_pdrop=lowerCamelCase ,attn_pdrop=lowerCamelCase ,layer_norm_epsilon=lowerCamelCase ,initializer_range=lowerCamelCase ,scale_attn_weights=lowerCamelCase ,use_cache=lowerCamelCase ,scale_attn_by_inverse_layer_idx=lowerCamelCase ,reorder_and_upcast_attn=lowerCamelCase ,) __SCREAMING_SNAKE_CASE = GPTaLMHeadModel(lowerCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : torch.Tensor ,lowerCamelCase : torch.Tensor ,lowerCamelCase : Optional[torch.Tensor] = None ,lowerCamelCase : Optional[torch.Tensor] = None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.encode_prefix(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.decode_prefix(lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.cat((prefix_embeds, embedding_text) ,dim=1 ) if labels is not None: __SCREAMING_SNAKE_CASE = self.get_dummy_token(input_ids.shape[0] ,input_ids.device ) __SCREAMING_SNAKE_CASE = torch.cat((dummy_token, input_ids) ,dim=1 ) __SCREAMING_SNAKE_CASE = self.transformer(inputs_embeds=lowerCamelCase ,labels=lowerCamelCase ,attention_mask=lowerCamelCase ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : torch.device ): '''simple docstring''' return torch.zeros(lowerCamelCase ,self.prefix_length ,dtype=torch.intaa ,device=lowerCamelCase ) def UpperCAmelCase__ ( self : str ,lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.encode_prefix(lowerCamelCase ) @torch.no_grad() def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.split(lowerCamelCase ,1 ,dim=0 ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for feature in features: __SCREAMING_SNAKE_CASE = self.decode_prefix(feature.to(lowerCamelCase ) ) # back to the clip feature # Only support beam search for now __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.generate_beam( input_embeds=lowerCamelCase ,device=lowerCamelCase ,eos_token_id=lowerCamelCase ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __SCREAMING_SNAKE_CASE = torch.stack(lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.stack(lowerCamelCase ) return generated_tokens, generated_seq_lengths @torch.no_grad() def UpperCAmelCase__ ( self : str ,lowerCamelCase : int=None ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : List[str]=None ,lowerCamelCase : int = 5 ,lowerCamelCase : int = 67 ,lowerCamelCase : float = 1.0 ,lowerCamelCase : Optional[int] = None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = torch.ones(lowerCamelCase ,device=lowerCamelCase ,dtype=torch.int ) __SCREAMING_SNAKE_CASE = torch.zeros(lowerCamelCase ,device=lowerCamelCase ,dtype=torch.bool ) if input_embeds is not None: __SCREAMING_SNAKE_CASE = input_embeds else: __SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(lowerCamelCase ) for i in range(lowerCamelCase ): __SCREAMING_SNAKE_CASE = self.transformer(inputs_embeds=lowerCamelCase ) __SCREAMING_SNAKE_CASE = outputs.logits __SCREAMING_SNAKE_CASE = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __SCREAMING_SNAKE_CASE = logits.softmax(-1 ).log() if scores is None: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = logits.topk(lowerCamelCase ,-1 ) __SCREAMING_SNAKE_CASE = generated.expand(lowerCamelCase ,*generated.shape[1:] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = next_tokens.permute(1 ,0 ), scores.squeeze(0 ) if tokens is None: __SCREAMING_SNAKE_CASE = next_tokens else: __SCREAMING_SNAKE_CASE = tokens.expand(lowerCamelCase ,*tokens.shape[1:] ) __SCREAMING_SNAKE_CASE = torch.cat((tokens, next_tokens) ,dim=1 ) else: __SCREAMING_SNAKE_CASE = -float(np.inf ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __SCREAMING_SNAKE_CASE = scores_sum / seq_lengths[:, None] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = scores_sum_average.view(-1 ).topk(lowerCamelCase ,-1 ) __SCREAMING_SNAKE_CASE = next_tokens // scores_sum.shape[1] __SCREAMING_SNAKE_CASE = seq_lengths[next_tokens_source] __SCREAMING_SNAKE_CASE = next_tokens % scores_sum.shape[1] __SCREAMING_SNAKE_CASE = next_tokens.unsqueeze(1 ) __SCREAMING_SNAKE_CASE = tokens[next_tokens_source] __SCREAMING_SNAKE_CASE = torch.cat((tokens, next_tokens) ,dim=1 ) __SCREAMING_SNAKE_CASE = generated[next_tokens_source] __SCREAMING_SNAKE_CASE = scores_sum_average * seq_lengths __SCREAMING_SNAKE_CASE = is_stopped[next_tokens_source] __SCREAMING_SNAKE_CASE = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 ) __SCREAMING_SNAKE_CASE = torch.cat((generated, next_token_embed) ,dim=1 ) __SCREAMING_SNAKE_CASE = is_stopped + next_tokens.eq(lowerCamelCase ).squeeze() if is_stopped.all(): break __SCREAMING_SNAKE_CASE = scores / seq_lengths __SCREAMING_SNAKE_CASE = scores.argsort(descending=lowerCamelCase ) # tokens tensors are already padded to max_seq_length __SCREAMING_SNAKE_CASE = [tokens[i] for i in order] __SCREAMING_SNAKE_CASE = torch.stack(lowerCamelCase ,dim=0 ) __SCREAMING_SNAKE_CASE = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype ) return output_texts, seq_lengths
13
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers a = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' require_version(deps[pkg] , __UpperCAmelCase )
13
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer a = ["bert-base-uncased", "bert-base-cased"] a = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class __a ( tf.keras.Model ): def __init__( self : Union[str, Any] ,lowerCamelCase : Tuple ): '''simple docstring''' super().__init__() __SCREAMING_SNAKE_CASE = tokenizer __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = TFAutoModel.from_config(lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.bert(**lowerCamelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' super().setUp() __SCREAMING_SNAKE_CASE = [ BertTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false __SCREAMING_SNAKE_CASE = [TFBertTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(lowerCamelCase ,use_fast_bert_tokenizer=lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __SCREAMING_SNAKE_CASE = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] __SCREAMING_SNAKE_CASE = list(zip(self.test_sentences ,self.test_sentences[::-1] ) ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): __SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_tensors="""tf""" ,padding="""longest""" ) __SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] ,tf.intaa ) == tf_outputs[key] ) ) @slow def UpperCAmelCase__ ( self : int ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __SCREAMING_SNAKE_CASE = tf_tokenizer(self.paired_sentences ) __SCREAMING_SNAKE_CASE = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] ,text_pair=[sentence[1] for sentence in self.paired_sentences] ,) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] ,tf.intaa ) == separated_outputs[key] ) ) @slow def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __SCREAMING_SNAKE_CASE = tf.function(lowerCamelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): __SCREAMING_SNAKE_CASE = tf.constant(lowerCamelCase ) __SCREAMING_SNAKE_CASE = compiled_tokenizer(lowerCamelCase ) __SCREAMING_SNAKE_CASE = tf_tokenizer(lowerCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __SCREAMING_SNAKE_CASE = ModelToSave(tokenizer=lowerCamelCase ) __SCREAMING_SNAKE_CASE = tf.convert_to_tensor(self.test_sentences ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """saved.model""" model.save(lowerCamelCase ) __SCREAMING_SNAKE_CASE = tf.keras.models.load_model(lowerCamelCase ) __SCREAMING_SNAKE_CASE = loaded_model(lowerCamelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) ,1E-5 )
13
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a = logging.getLogger(__name__) @dataclass class __a : __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, ) __UpperCamelCase : int = field( default=1024, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} ) __UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __a : __UpperCamelCase : str = field( default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) __UpperCamelCase : str = field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) def __magic_name__ ( ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1] __SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __SCREAMING_SNAKE_CASE = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __SCREAMING_SNAKE_CASE = False # Some models have set the order of the labels to use, so let's make sure we do use it. __SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1} __SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__UpperCAmelCase ): # Tokenize the texts def _convert_table_text_to_pandas(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __SCREAMING_SNAKE_CASE = examples["""statement"""] __SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __SCREAMING_SNAKE_CASE = raw_datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""train"""] if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = default_data_collator elif training_args.fpaa: __SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 ) else: __SCREAMING_SNAKE_CASE = None # Initialize our Trainer __SCREAMING_SNAKE_CASE = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE = last_checkpoint __SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = train_result.metrics __SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __UpperCAmelCase ) trainer.save_metrics("""train""" , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics("""eval""" , __UpperCAmelCase ) trainer.save_metrics("""eval""" , __UpperCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" ) __SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) __SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(__UpperCAmelCase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = label_list[item] writer.write(f"""{index}\t{item}\n""" ) __SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Any: '''simple docstring''' main() if __name__ == "__main__": main()
13
1
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> float: '''simple docstring''' if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) __SCREAMING_SNAKE_CASE = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
13
1
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) # TODO Update this a = { "facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json", # See all ESM models at https://huggingface.co/models?filter=esm } class __a ( _snake_case ): __UpperCamelCase : Any = 'esm' def __init__( self : Tuple ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : int=None ,lowerCamelCase : int=None ,lowerCamelCase : Optional[Any]=768 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[int]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : List[str]=1026 ,lowerCamelCase : int=0.02 ,lowerCamelCase : Any=1E-1_2 ,lowerCamelCase : Dict="absolute" ,lowerCamelCase : Dict=True ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : List[Any]=False ,lowerCamelCase : int=False ,lowerCamelCase : int=None ,lowerCamelCase : Union[str, Any]=None ,**lowerCamelCase : List[Any] ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase ,mask_token_id=lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = emb_layer_norm_before __SCREAMING_SNAKE_CASE = token_dropout __SCREAMING_SNAKE_CASE = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) __SCREAMING_SNAKE_CASE = EsmFoldConfig() elif isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = EsmFoldConfig(**lowerCamelCase ) __SCREAMING_SNAKE_CASE = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) __SCREAMING_SNAKE_CASE = get_default_vocab_list() else: __SCREAMING_SNAKE_CASE = vocab_list else: __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.esmfold_config is not None and getattr(self.esmfold_config ,"""use_esm_attn_map""" ,lowerCamelCase ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = super().to_dict() if isinstance(self.esmfold_config ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = self.esmfold_config.to_dict() return output @dataclass class __a : __UpperCamelCase : str = None __UpperCamelCase : bool = True __UpperCamelCase : bool = False __UpperCamelCase : bool = False __UpperCamelCase : bool = False __UpperCamelCase : float = 0 __UpperCamelCase : bool = True __UpperCamelCase : bool = False __UpperCamelCase : int = 128 __UpperCamelCase : "TrunkConfig" = None def UpperCAmelCase__ ( self : str ): '''simple docstring''' if self.trunk is None: __SCREAMING_SNAKE_CASE = TrunkConfig() elif isinstance(self.trunk ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = TrunkConfig(**self.trunk ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = asdict(self ) __SCREAMING_SNAKE_CASE = self.trunk.to_dict() return output @dataclass class __a : __UpperCamelCase : int = 48 __UpperCamelCase : int = 1024 __UpperCamelCase : int = 128 __UpperCamelCase : int = 32 __UpperCamelCase : int = 32 __UpperCamelCase : int = 32 __UpperCamelCase : float = 0 __UpperCamelCase : float = 0 __UpperCamelCase : bool = False __UpperCamelCase : int = 4 __UpperCamelCase : Optional[int] = 128 __UpperCamelCase : "StructureModuleConfig" = None def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if self.structure_module is None: __SCREAMING_SNAKE_CASE = StructureModuleConfig() elif isinstance(self.structure_module ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) __SCREAMING_SNAKE_CASE = self.sequence_state_dim // self.sequence_head_width __SCREAMING_SNAKE_CASE = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = asdict(self ) __SCREAMING_SNAKE_CASE = self.structure_module.to_dict() return output @dataclass class __a : __UpperCamelCase : int = 384 __UpperCamelCase : int = 128 __UpperCamelCase : int = 16 __UpperCamelCase : int = 128 __UpperCamelCase : int = 12 __UpperCamelCase : int = 4 __UpperCamelCase : int = 8 __UpperCamelCase : float = 0.1 __UpperCamelCase : int = 8 __UpperCamelCase : int = 1 __UpperCamelCase : int = 2 __UpperCamelCase : int = 7 __UpperCamelCase : int = 10 __UpperCamelCase : float = 1E-8 __UpperCamelCase : float = 1E5 def UpperCAmelCase__ ( self : int ): '''simple docstring''' return asdict(self ) def __magic_name__ ( ) -> List[Any]: '''simple docstring''' return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" ) __SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) __SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": a = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
13
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __a ( unittest.TestCase ): def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224} __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_convert_rgb def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __SCREAMING_SNAKE_CASE = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) ) else: __SCREAMING_SNAKE_CASE = [] for i in range(self.batch_size ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 ) image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs] if torchify: __SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase ) @property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase ) __SCREAMING_SNAKE_CASE = 3 @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
13
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class __a ( _snake_case ): __UpperCamelCase : Tuple = 'camembert' def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class __a ( _snake_case ): @property def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
13
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class __a ( unittest.TestCase ): def __init__( self : str ,lowerCamelCase : List[str] ,lowerCamelCase : str=7 ,lowerCamelCase : Optional[Any]=3 ,lowerCamelCase : Tuple=30 ,lowerCamelCase : int=400 ,lowerCamelCase : Union[str, Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : str=True ,lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] ,lowerCamelCase : Tuple=[0.5, 0.5, 0.5] ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Any=1 / 255 ,lowerCamelCase : Any=True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_pad def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple=False ): '''simple docstring''' if not batched: __SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(lowerCamelCase ,Image.Image ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] if w < h: __SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w ) __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] elif w > h: __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] __SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h ) else: __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] else: __SCREAMING_SNAKE_CASE = [] for image in image_inputs: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __SCREAMING_SNAKE_CASE = max(lowerCamelCase ,key=lambda lowerCamelCase : item[0] )[0] __SCREAMING_SNAKE_CASE = max(lowerCamelCase ,key=lambda lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Union[str, Any] = DetaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_rescale""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_pad""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad ,lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase ,batched=lowerCamelCase ) __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase ,numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase ,batched=lowerCamelCase ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase ,torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase ) self.assertEqual( encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCamelCase ,batched=lowerCamelCase ) self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) ,) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" ,"""r""" ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {"""image_id""": 3_9769, """annotations""": target} # encode them __SCREAMING_SNAKE_CASE = DetaImageProcessor() __SCREAMING_SNAKE_CASE = image_processing(images=lowerCamelCase ,annotations=lowerCamelCase ,return_tensors="""pt""" ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,lowerCamelCase ,atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,lowerCamelCase ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,lowerCamelCase ,atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,lowerCamelCase ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,lowerCamelCase ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,lowerCamelCase ) ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,lowerCamelCase ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,lowerCamelCase ) ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" ,"""r""" ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} __SCREAMING_SNAKE_CASE = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __SCREAMING_SNAKE_CASE = DetaImageProcessor(format="""coco_panoptic""" ) __SCREAMING_SNAKE_CASE = image_processing(images=lowerCamelCase ,annotations=lowerCamelCase ,masks_path=lowerCamelCase ,return_tensors="""pt""" ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,lowerCamelCase ,atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,lowerCamelCase ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,lowerCamelCase ,atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,lowerCamelCase ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,lowerCamelCase ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,lowerCamelCase ) ) # verify masks __SCREAMING_SNAKE_CASE = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() ,lowerCamelCase ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,lowerCamelCase ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,lowerCamelCase ) )
13
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __a ( unittest.TestCase ): def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 __SCREAMING_SNAKE_CASE = num_patches + 1 def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = ViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,) return config, pixel_values def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = (self.image_size, self.image_size) __SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.type_sequence_label_size __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase ) __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FlaxViTModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class(lowerCamelCase ) __SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = model_class(lowerCamelCase ) @jax.jit def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ): return model(pixel_values=lowerCamelCase ,**lowerCamelCase ) with self.subTest("""JIT Enabled""" ): __SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple() self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) ) for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase )
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { "asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json", # See all SEW models at https://huggingface.co/models?filter=sew } class __a ( _snake_case ): __UpperCamelCase : Tuple = 'sew' def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,): '''simple docstring''' super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = feat_extract_norm __SCREAMING_SNAKE_CASE = feat_extract_activation __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = list(lowerCamelCase ) __SCREAMING_SNAKE_CASE = conv_bias __SCREAMING_SNAKE_CASE = num_conv_pos_embeddings __SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups __SCREAMING_SNAKE_CASE = len(self.conv_dim ) __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = squeeze_factor __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = feat_proj_dropout __SCREAMING_SNAKE_CASE = final_dropout __SCREAMING_SNAKE_CASE = layerdrop __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __SCREAMING_SNAKE_CASE = apply_spec_augment __SCREAMING_SNAKE_CASE = mask_time_prob __SCREAMING_SNAKE_CASE = mask_time_length __SCREAMING_SNAKE_CASE = mask_time_min_masks __SCREAMING_SNAKE_CASE = mask_feature_prob __SCREAMING_SNAKE_CASE = mask_feature_length __SCREAMING_SNAKE_CASE = mask_feature_min_masks # ctc loss __SCREAMING_SNAKE_CASE = ctc_loss_reduction __SCREAMING_SNAKE_CASE = ctc_zero_infinity # sequence classification __SCREAMING_SNAKE_CASE = use_weighted_layer_sum __SCREAMING_SNAKE_CASE = classifier_proj_size @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return functools.reduce(operator.mul ,self.conv_stride ,1 )
13
1
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: __SCREAMING_SNAKE_CASE = s_dict.pop(__UpperCAmelCase ) elif "subsample" in key: __SCREAMING_SNAKE_CASE = s_dict.pop(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape __SCREAMING_SNAKE_CASE = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = emb.weight.data return lin_layer def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.load(__UpperCAmelCase , map_location="""cpu""" ) __SCREAMING_SNAKE_CASE = mam_aaa["""args"""] __SCREAMING_SNAKE_CASE = mam_aaa["""model"""] __SCREAMING_SNAKE_CASE = state_dict["""decoder.output_projection.weight"""] remove_ignore_keys_(__UpperCAmelCase ) rename_keys(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = state_dict["""decoder.embed_tokens.weight"""].shape[0] __SCREAMING_SNAKE_CASE = args.share_decoder_input_output_embed __SCREAMING_SNAKE_CASE = [int(__UpperCAmelCase ) for i in args.conv_kernel_sizes.split(""",""" )] __SCREAMING_SNAKE_CASE = SpeechaTextConfig( vocab_size=__UpperCAmelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(__UpperCAmelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__UpperCAmelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__UpperCAmelCase , num_beams=5 , max_length=200 , use_cache=__UpperCAmelCase , decoder_start_token_id=2 , early_stopping=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = SpeechaTextForConditionalGeneration(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( """Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,""" f""" but all the following weights are missing {missing}""" ) if tie_embeds: __SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __SCREAMING_SNAKE_CASE = lm_head_weights model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") a = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
13
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 0 for divide_by_number in range(__UpperCAmelCase , digit + 1 ): __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = divide_by_number else: has_been_divided.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' from collections.abc import Generator from math import sin def __magic_name__ ( __UpperCAmelCase ) -> bytes: '''simple docstring''' if len(__UpperCAmelCase ) != 32: raise ValueError("""Input must be of length 32""" ) __SCREAMING_SNAKE_CASE = b"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __magic_name__ ( __UpperCAmelCase ) -> bytes: '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) __SCREAMING_SNAKE_CASE = format(__UpperCAmelCase , """08x""" )[-8:] __SCREAMING_SNAKE_CASE = b"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def __magic_name__ ( __UpperCAmelCase ) -> bytes: '''simple docstring''' __SCREAMING_SNAKE_CASE = b"""""" for char in message: bit_string += format(__UpperCAmelCase , """08b""" ).encode("""utf-8""" ) __SCREAMING_SNAKE_CASE = format(len(__UpperCAmelCase ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __magic_name__ ( __UpperCAmelCase ) -> Generator[list[int], None, None]: '''simple docstring''' if len(__UpperCAmelCase ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(__UpperCAmelCase ) , 512 ): __SCREAMING_SNAKE_CASE = bit_string[pos : pos + 512] __SCREAMING_SNAKE_CASE = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) __SCREAMING_SNAKE_CASE = format(__UpperCAmelCase , """032b""" ) __SCREAMING_SNAKE_CASE = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(__UpperCAmelCase , 2 ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' return (a + b) % 2**32 def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __magic_name__ ( __UpperCAmelCase ) -> bytes: '''simple docstring''' __SCREAMING_SNAKE_CASE = preprocess(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __SCREAMING_SNAKE_CASE = 0X6745_2301 __SCREAMING_SNAKE_CASE = 0XEFCD_AB89 __SCREAMING_SNAKE_CASE = 0X98BA_DCFE __SCREAMING_SNAKE_CASE = 0X1032_5476 __SCREAMING_SNAKE_CASE = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = aa __SCREAMING_SNAKE_CASE = ba __SCREAMING_SNAKE_CASE = ca __SCREAMING_SNAKE_CASE = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __SCREAMING_SNAKE_CASE = d ^ (b & (c ^ d)) __SCREAMING_SNAKE_CASE = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __SCREAMING_SNAKE_CASE = c ^ (d & (b ^ c)) __SCREAMING_SNAKE_CASE = (5 * i + 1) % 16 elif i <= 47: __SCREAMING_SNAKE_CASE = b ^ c ^ d __SCREAMING_SNAKE_CASE = (3 * i + 5) % 16 else: __SCREAMING_SNAKE_CASE = c ^ (b | not_aa(__UpperCAmelCase )) __SCREAMING_SNAKE_CASE = (7 * i) % 16 __SCREAMING_SNAKE_CASE = (f + a + added_consts[i] + block_words[g]) % 2**32 __SCREAMING_SNAKE_CASE = d __SCREAMING_SNAKE_CASE = c __SCREAMING_SNAKE_CASE = b __SCREAMING_SNAKE_CASE = sum_aa(__UpperCAmelCase , left_rotate_aa(__UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __SCREAMING_SNAKE_CASE = sum_aa(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = sum_aa(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = sum_aa(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = sum_aa(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) + reformat_hex(__UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __a ( unittest.TestCase ): def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224} __SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_center_crop __SCREAMING_SNAKE_CASE = crop_size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_convert_rgb def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __SCREAMING_SNAKE_CASE = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) ) else: __SCREAMING_SNAKE_CASE = [] for i in range(self.batch_size ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 ) image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs] if torchify: __SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase ) @property def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' pass def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) @require_torch @require_vision class __a ( _snake_case, unittest.TestCase ): __UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase ) __SCREAMING_SNAKE_CASE = 3 @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) ) self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ) for image in image_inputs: self.assertIsInstance(lowerCamelCase ,Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched __SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
13
1
'''simple docstring''' import math import os import sys def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = """""" try: with open(__UpperCAmelCase , """rb""" ) as binary_file: __SCREAMING_SNAKE_CASE = binary_file.read() for dat in data: __SCREAMING_SNAKE_CASE = f"""{dat:08b}""" result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' lexicon.pop(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = last_match_id if math.loga(__UpperCAmelCase ).is_integer(): for curr_key in lexicon: __SCREAMING_SNAKE_CASE = """0""" + lexicon[curr_key] __SCREAMING_SNAKE_CASE = bin(__UpperCAmelCase )[2:] def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = {"""0""": """0""", """1""": """1"""} __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = """""", """""" __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) index += 1 __SCREAMING_SNAKE_CASE = """""" while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __SCREAMING_SNAKE_CASE = lexicon[curr_string] result += last_match_id return result def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = os.path.getsize(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = bin(__UpperCAmelCase )[2:] __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __SCREAMING_SNAKE_CASE = 8 try: with open(__UpperCAmelCase , """wb""" ) as opened_file: __SCREAMING_SNAKE_CASE = [ to_write[i : i + byte_length] for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> None: '''simple docstring''' __SCREAMING_SNAKE_CASE = read_file_binary(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = compress_data(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = add_file_length(__UpperCAmelCase , __UpperCAmelCase ) write_file_binary(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
13
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __magic_name__ ( __UpperCAmelCase ) -> Tuple: '''simple docstring''' def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = timeit.default_timer() __SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime return delta __SCREAMING_SNAKE_CASE = func.__name__ return wrapper def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = seq_shapes or {} for i in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__UpperCAmelCase , _ArrayXD ): __SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__UpperCAmelCase , datasets.Value ): if v.dtype == "string": __SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged.""" else: __SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__UpperCAmelCase , datasets.Sequence ): while isinstance(__UpperCAmelCase , datasets.Sequence ): __SCREAMING_SNAKE_CASE = v.feature __SCREAMING_SNAKE_CASE = seq_shapes[k] __SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype ) __SCREAMING_SNAKE_CASE = data dummy_data.append((i, example) ) return dummy_data def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase ) with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer: for key, record in dummy_data: __SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase ) writer.write(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) __SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) ) return dataset
13
1
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan a = 637_8137.0 a = 635_6752.31_4245 a = 6378137 def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float: '''simple docstring''' __SCREAMING_SNAKE_CASE = (AXIS_A - AXIS_B) / AXIS_A __SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) ) __SCREAMING_SNAKE_CASE = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) ) __SCREAMING_SNAKE_CASE = radians(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = radians(__UpperCAmelCase ) # Equation __SCREAMING_SNAKE_CASE = sin((phi_a - phi_a) / 2 ) __SCREAMING_SNAKE_CASE = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda __SCREAMING_SNAKE_CASE = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
13
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a = "__DUMMY_TRANSFORMERS_USER__" a = "Dummy User" a = "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" a = "https://hub-ci.huggingface.co" a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}" a = Path("~/.huggingface/hub_ci_token").expanduser() @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' HfFolder.save_token(__UpperCAmelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __magic_name__ ( ) -> Optional[Any]: '''simple docstring''' return HfApi(endpoint=__UpperCAmelCase ) @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfFolder.get_token() HfFolder.save_token(__UpperCAmelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__UpperCAmelCase ) @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> Dict: '''simple docstring''' def _cleanup_repo(__UpperCAmelCase ): hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' @contextmanager def _temporary_repo(__UpperCAmelCase ): try: yield repo_id finally: cleanup_repo(__UpperCAmelCase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}""" __SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase ) hf_api.upload_file( token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
13
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __a ( _snake_case ): __UpperCamelCase : Tuple = ['image_processor', 'tokenizer'] __UpperCamelCase : Tuple = 'CLIPImageProcessor' __UpperCamelCase : Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : List[str] ,lowerCamelCase : int=None ,lowerCamelCase : List[Any]=None ,**lowerCamelCase : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,lowerCamelCase ,) __SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" ) __SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowerCamelCase ,lowerCamelCase ) def __call__( self : int ,lowerCamelCase : List[str]=None ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[str]=None ,**lowerCamelCase : int ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase ) if images is not None: __SCREAMING_SNAKE_CASE = self.image_processor(lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase ) if text is not None and images is not None: __SCREAMING_SNAKE_CASE = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCamelCase ) ,tensor_type=lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ,*lowerCamelCase : Tuple ,**lowerCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*lowerCamelCase ,**lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ,*lowerCamelCase : List[Any] ,**lowerCamelCase : List[str] ): '''simple docstring''' return self.tokenizer.decode(*lowerCamelCase ,**lowerCamelCase ) @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names __SCREAMING_SNAKE_CASE = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase ,) return self.image_processor_class @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase ,) return self.image_processor
13
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a = logging.get_logger(__name__) a = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : Dict = 'deta' __UpperCamelCase : List[str] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,): '''simple docstring''' if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] ) else: if isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type] __SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase ) __SCREAMING_SNAKE_CASE = backbone_config __SCREAMING_SNAKE_CASE = num_queries __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = init_xavier_std __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = auxiliary_loss __SCREAMING_SNAKE_CASE = position_embedding_type # deformable attributes __SCREAMING_SNAKE_CASE = num_feature_levels __SCREAMING_SNAKE_CASE = encoder_n_points __SCREAMING_SNAKE_CASE = decoder_n_points __SCREAMING_SNAKE_CASE = two_stage __SCREAMING_SNAKE_CASE = two_stage_num_proposals __SCREAMING_SNAKE_CASE = with_box_refine __SCREAMING_SNAKE_CASE = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __SCREAMING_SNAKE_CASE = class_cost __SCREAMING_SNAKE_CASE = bbox_cost __SCREAMING_SNAKE_CASE = giou_cost # Loss coefficients __SCREAMING_SNAKE_CASE = mask_loss_coefficient __SCREAMING_SNAKE_CASE = dice_loss_coefficient __SCREAMING_SNAKE_CASE = bbox_loss_coefficient __SCREAMING_SNAKE_CASE = giou_loss_coefficient __SCREAMING_SNAKE_CASE = eos_coefficient __SCREAMING_SNAKE_CASE = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase ) @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return self.encoder_attention_heads @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.d_model def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = self.backbone_config.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output
13
1
'''simple docstring''' import json import os import pickle import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers import is_faiss_available from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bart.tokenization_bart import BartTokenizer from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch if is_faiss_available(): import faiss @require_faiss class __a ( _snake_case ): def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = 8 # DPR tok __SCREAMING_SNAKE_CASE = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase ) __SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok __SCREAMING_SNAKE_CASE = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) ) __SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""} __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase ) __SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] ) __SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase ) ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) ) def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )], } ) dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT ) return dataset def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.get_dummy_dataset() __SCREAMING_SNAKE_CASE = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,) with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: __SCREAMING_SNAKE_CASE = dataset __SCREAMING_SNAKE_CASE = RagRetriever( lowerCamelCase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,) return retriever def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : bool ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.get_dummy_dataset() __SCREAMING_SNAKE_CASE = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""custom""" ,) if from_disk: __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""dataset""" ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""index.faiss""" ) dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname ,"""index.faiss""" ) ) dataset.drop_index("""embeddings""" ) dataset.save_to_disk(os.path.join(self.tmpdirname ,"""dataset""" ) ) del dataset __SCREAMING_SNAKE_CASE = RagRetriever( lowerCamelCase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,) else: __SCREAMING_SNAKE_CASE = RagRetriever( lowerCamelCase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,lowerCamelCase ) ,) return retriever def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = Dataset.from_dict( { """id""": ["""0""", """1"""], """text""": ["""foo""", """bar"""], """title""": ["""Foo""", """Bar"""], """embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )], } ) dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""hf_bert_base.hnswSQ8_correct_phi_128.c_index""" ) dataset.save_faiss_index("""embeddings""" ,index_file_name + """.index.dpr""" ) pickle.dump(dataset["""id"""] ,open(index_file_name + """.index_meta.dpr""" ,"""wb""" ) ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,"""psgs_w100.tsv.pkl""" ) __SCREAMING_SNAKE_CASE = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset} pickle.dump(lowerCamelCase ,open(lowerCamelCase ,"""wb""" ) ) __SCREAMING_SNAKE_CASE = RagConfig( retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""legacy""" ,index_path=self.tmpdirname ,) __SCREAMING_SNAKE_CASE = RagRetriever( lowerCamelCase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ) return retriever def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever() __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase ,n_docs=lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCamelCase ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset: __SCREAMING_SNAKE_CASE = self.get_dummy_dataset() retriever.save_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase ,n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase ,n_docs=lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCamelCase ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase ,n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase ,n_docs=lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCamelCase ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""id"""] ) ,lowerCamelCase ) self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase ,n_docs=1 ) self.assertTrue(out is not None ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = self.get_dummy_legacy_index_retriever() __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase ,n_docs=lowerCamelCase ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertEqual(len(lowerCamelCase ) ,2 ) self.assertEqual(sorted(doc_dicts[0] ) ,["""text""", """title"""] ) self.assertEqual(len(doc_dicts[0]["""text"""] ) ,lowerCamelCase ) self.assertEqual(doc_dicts[0]["""text"""][0] ,"""bar""" ) # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["""text"""][0] ,"""foo""" ) # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist() ,[[1], [0]] ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.get_dummy_legacy_index_retriever() with tempfile.TemporaryDirectory() as tmp_dirname: retriever.save_pretrained(lowerCamelCase ) __SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(lowerCamelCase ) self.assertIsInstance(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE = retriever.retrieve(lowerCamelCase ,n_docs=1 ) self.assertTrue(out is not None ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase__ ( self : int ): '''simple docstring''' import torch __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = self.get_dummy_canonical_hf_index_retriever() __SCREAMING_SNAKE_CASE = [[5, 7], [10, 11]] __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE = retriever(lowerCamelCase ,lowerCamelCase ,prefix=retriever.config.generator.prefix ,n_docs=lowerCamelCase ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowerCamelCase ,lowerCamelCase ) self.assertIsInstance(lowerCamelCase ,lowerCamelCase ) self.assertIsInstance(lowerCamelCase ,np.ndarray ) __SCREAMING_SNAKE_CASE = retriever( lowerCamelCase ,lowerCamelCase ,prefix=retriever.config.generator.prefix ,n_docs=lowerCamelCase ,return_tensors="""pt""" ,) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( # noqa: F841 out["""context_input_ids"""], out["""context_attention_mask"""], out["""retrieved_doc_embeds"""], out["""doc_ids"""], ) self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) ) self.assertIsInstance(lowerCamelCase ,torch.Tensor ) self.assertIsInstance(lowerCamelCase ,torch.Tensor ) self.assertIsInstance(lowerCamelCase ,torch.Tensor ) @require_torch @require_tokenizers @require_sentencepiece def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.get_dpr_ctx_encoder_tokenizer() __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCamelCase ) retriever.set_ctx_encoder_tokenizer(lowerCamelCase ) __SCREAMING_SNAKE_CASE = [[5, 7], [10, 11]] __SCREAMING_SNAKE_CASE = np.array( [np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa ) __SCREAMING_SNAKE_CASE = retriever(lowerCamelCase ,lowerCamelCase ,prefix=retriever.config.generator.prefix ,n_docs=lowerCamelCase ) self.assertEqual( len(lowerCamelCase ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs self.assertEqual( all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) ,lowerCamelCase ) # check for doc token related keys in dictionary.
13
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : List[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape __SCREAMING_SNAKE_CASE = jax.image.resize( lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,) __SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = nn.Conv( self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) def __call__( self : List[str] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase ) return hidden_states class __a ( nn.Module ): __UpperCamelCase : int __UpperCamelCase : int = None __UpperCamelCase : float = 0.0 __UpperCamelCase : bool = None __UpperCamelCase : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels __SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype ) __SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 ) __SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob ) __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) __SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __SCREAMING_SNAKE_CASE = None if use_nin_shortcut: __SCREAMING_SNAKE_CASE = nn.Conv( lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,) def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ): '''simple docstring''' __SCREAMING_SNAKE_CASE = hidden_states __SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) ) __SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 ) __SCREAMING_SNAKE_CASE = hidden_states + temb __SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase ) __SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase ) if self.conv_shortcut is not None: __SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase ) return hidden_states + residual
13
1
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) __SCREAMING_SNAKE_CASE = precision __SCREAMING_SNAKE_CASE = ceil(precision / 14 ) __SCREAMING_SNAKE_CASE = 426880 * Decimal(10005 ).sqrt() __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 13591409 __SCREAMING_SNAKE_CASE = Decimal(__UpperCAmelCase ) for k in range(1 , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE = factorial(6 * k ) // (factorial(3 * k ) * factorial(__UpperCAmelCase ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": a = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
13
'''simple docstring''' import sys from collections import defaultdict class __a : def __init__( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pos def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __SCREAMING_SNAKE_CASE = 2 * start + 1 else: __SCREAMING_SNAKE_CASE = 2 * start + 2 if heap[smallest_child] < heap[start]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( heap[start], positions[start], ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa __SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] ,self.get_position(positions[start] ) ) self.set_position(positions[start] ,lowerCamelCase ) self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = position[index] while index != 0: __SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __SCREAMING_SNAKE_CASE = heap[parent] __SCREAMING_SNAKE_CASE = position[parent] self.set_position(position[parent] ,lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,lowerCamelCase ) break __SCREAMING_SNAKE_CASE = parent else: __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = temp self.set_position(lowerCamelCase ,0 ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1 for i in range(lowerCamelCase ,-1 ,-1 ): self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = positions[0] __SCREAMING_SNAKE_CASE = sys.maxsize self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase ) return temp def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' __SCREAMING_SNAKE_CASE = Heap() __SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex __SCREAMING_SNAKE_CASE = [] for vertex in range(len(__UpperCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(__UpperCAmelCase ) heap.node_position.append(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = sys.maxsize for neighbor, distance in adjacency_list[0]: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = distance heap.heapify(__UpperCAmelCase , __UpperCAmelCase ) for _ in range(1 , len(__UpperCAmelCase ) ): __SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __SCREAMING_SNAKE_CASE = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__UpperCAmelCase )] ): __SCREAMING_SNAKE_CASE = distance heap.bottom_to_top( __UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a = int(input("Enter number of edges: ").strip()) a = defaultdict(list) for _ in range(edges_number): a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
13
1
'''simple docstring''' import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def __magic_name__ ( ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=__UpperCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=__UpperCAmelCase , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=__UpperCAmelCase ) return parser.parse_args() def __magic_name__ ( ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = parse_args() # Import training_script as a module. __SCREAMING_SNAKE_CASE = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) __SCREAMING_SNAKE_CASE = script_fpath.stem __SCREAMING_SNAKE_CASE = importlib.import_module(__UpperCAmelCase ) # Patch sys.argv __SCREAMING_SNAKE_CASE = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
13
'''simple docstring''' import os import string import sys a = 1 << 8 a = { "tab": ord("\t"), "newline": ord("\r"), "esc": 27, "up": 65 + ARROW_KEY_FLAG, "down": 66 + ARROW_KEY_FLAG, "right": 67 + ARROW_KEY_FLAG, "left": 68 + ARROW_KEY_FLAG, "mod_int": 91, "undefined": sys.maxsize, "interrupt": 3, "insert": 50, "delete": 51, "pg_up": 53, "pg_down": 54, } a = KEYMAP["up"] a = KEYMAP["left"] if sys.platform == "win32": a = [] a = { b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG, b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG, b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG, b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG, b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG, b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG, b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG, b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG, } for i in range(10): a = ord(str(i)) def __magic_name__ ( ) -> Union[str, Any]: '''simple docstring''' if os.name == "nt": import msvcrt __SCREAMING_SNAKE_CASE = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(__UpperCAmelCase ) == 0: # Read the keystroke __SCREAMING_SNAKE_CASE = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): __SCREAMING_SNAKE_CASE = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: __SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(__UpperCAmelCase ) if ord(__UpperCAmelCase ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) __SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] ) except KeyError: __SCREAMING_SNAKE_CASE = cha[1] else: __SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase ) else: __SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty __SCREAMING_SNAKE_CASE = sys.stdin.fileno() __SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase ) try: tty.setraw(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = sys.stdin.read(1 ) finally: termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase ) return ch def __magic_name__ ( ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(__UpperCAmelCase ) == KEYMAP["esc"]: __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]: __SCREAMING_SNAKE_CASE = get_raw_chars() if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
13
1
'''simple docstring''' import random from .binary_exp_mod import bin_exp_mod def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=1000 ) -> Any: '''simple docstring''' if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __SCREAMING_SNAKE_CASE = n - 1 __SCREAMING_SNAKE_CASE = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __SCREAMING_SNAKE_CASE = 0 while count < prec: __SCREAMING_SNAKE_CASE = random.randint(2 , n - 1 ) __SCREAMING_SNAKE_CASE = bin_exp_mod(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if b != 1: __SCREAMING_SNAKE_CASE = True for _ in range(__UpperCAmelCase ): if b == n - 1: __SCREAMING_SNAKE_CASE = False break __SCREAMING_SNAKE_CASE = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": a = abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
13
'''simple docstring''' from __future__ import annotations import bisect def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' if hi < 0: __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) while lo < hi: __SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __SCREAMING_SNAKE_CASE = mid + 1 else: __SCREAMING_SNAKE_CASE = mid return lo def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' if hi < 0: __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) while lo < hi: __SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __SCREAMING_SNAKE_CASE = mid + 1 else: __SCREAMING_SNAKE_CASE = mid return lo def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None: '''simple docstring''' sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1 while left <= right: __SCREAMING_SNAKE_CASE = left + (right - left) // 2 __SCREAMING_SNAKE_CASE = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __SCREAMING_SNAKE_CASE = midpoint - 1 else: __SCREAMING_SNAKE_CASE = midpoint + 1 return None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' __SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase ) if index != len(__UpperCAmelCase ) and sorted_collection[index] == item: return index return None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None: '''simple docstring''' if right < left: return None __SCREAMING_SNAKE_CASE = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 ) else: return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase ) if __name__ == "__main__": a = input("Enter numbers separated by comma:\n").strip() a = sorted(int(item) for item in user_input.split(",")) a = int(input("Enter a single number to be found in the list:\n")) a = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
13
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class __a ( _snake_case ): __UpperCamelCase : Optional[int] = 'microsoft/speecht5_tts' __UpperCamelCase : List[str] = ( 'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ' 'text to read (in English) and returns a waveform object containing the sound.' ) __UpperCamelCase : List[str] = 'text_reader' __UpperCamelCase : List[str] = SpeechTaProcessor __UpperCamelCase : Union[str, Any] = SpeechTaForTextToSpeech __UpperCamelCase : Any = SpeechTaHifiGan __UpperCamelCase : List[str] = ['text'] __UpperCamelCase : Any = ['audio'] def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' if self.post_processor is None: __SCREAMING_SNAKE_CASE = """microsoft/speecht5_hifigan""" super().setup() def UpperCAmelCase__ ( self : Any ,lowerCamelCase : str ,lowerCamelCase : List[str]=None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.pre_processor(text=lowerCamelCase ,return_tensors="""pt""" ,truncation=lowerCamelCase ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) __SCREAMING_SNAKE_CASE = load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" ) __SCREAMING_SNAKE_CASE = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ): '''simple docstring''' with torch.no_grad(): return self.model.generate_speech(**lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Optional[Any] ): '''simple docstring''' with torch.no_grad(): return self.post_processor(lowerCamelCase ).cpu().detach()
13
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging a = logging.get_logger(__name__) class __a ( _snake_case ): __UpperCamelCase : int = 'linear' __UpperCamelCase : Tuple = 'cosine' __UpperCamelCase : Tuple = 'cosine_with_restarts' __UpperCamelCase : List[Any] = 'polynomial' __UpperCamelCase : Optional[Any] = 'constant' __UpperCamelCase : Optional[int] = 'constant_with_warmup' __UpperCamelCase : List[Any] = 'piecewise_constant' def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) ) return 1.0 return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" ) __SCREAMING_SNAKE_CASE = int(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = float(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = value __SCREAMING_SNAKE_CASE = float(rule_list[-1] ) def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ): def rule_func(__UpperCAmelCase ) -> float: __SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__UpperCAmelCase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple: '''simple docstring''' def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) ) return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(__UpperCAmelCase ): if current_step < num_warmup_steps: return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __SCREAMING_SNAKE_CASE = lr_init - lr_end __SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps __SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps __SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) a = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , ) return schedule_func( __UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
13
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging a = logging.get_logger(__name__) a = { "EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __a ( _snake_case ): __UpperCamelCase : List[Any] = 'gpt_neo' __UpperCamelCase : Tuple = ['past_key_values'] __UpperCamelCase : Optional[int] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : List[Any] ,lowerCamelCase : Optional[Any]=5_0257 ,lowerCamelCase : List[str]=2048 ,lowerCamelCase : Optional[int]=2048 ,lowerCamelCase : Optional[Any]=24 ,lowerCamelCase : str=[[["global", "local"], 12]] ,lowerCamelCase : int=16 ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Optional[Any]=256 ,lowerCamelCase : Union[str, Any]="gelu_new" ,lowerCamelCase : List[str]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : int=0.0 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=1E-5 ,lowerCamelCase : Optional[int]=0.02 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Dict=5_0256 ,lowerCamelCase : Tuple=5_0256 ,**lowerCamelCase : Optional[Any] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_layers __SCREAMING_SNAKE_CASE = num_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = window_size __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = resid_dropout __SCREAMING_SNAKE_CASE = embed_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = classifier_dropout __SCREAMING_SNAKE_CASE = layer_norm_epsilon __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = attention_types __SCREAMING_SNAKE_CASE = self.expand_attention_types_params(lowerCamelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.attention_layers)` == `config.num_layers` """ f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """ f"""`config.num_layers = {self.num_layers}`. """ """`config.attention_layers` is prepared using `config.attention_types`. """ """Please verify the value of `config.attention_types` argument.""" ) super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase ) @staticmethod def UpperCAmelCase__ ( lowerCamelCase : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' import torch __SCREAMING_SNAKE_CASE = input.size() __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = shape[dimension] __SCREAMING_SNAKE_CASE = torch.arange(0 , __UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = torch.div(sizedim - size , __UpperCAmelCase , rounding_mode="""floor""" ) + 1 __SCREAMING_SNAKE_CASE = torch.arange(__UpperCAmelCase ) + low_indices[:min_length][:, None] __SCREAMING_SNAKE_CASE = [slice(__UpperCAmelCase )] * rank __SCREAMING_SNAKE_CASE = indices __SCREAMING_SNAKE_CASE = input[s] __SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' import torch __SCREAMING_SNAKE_CASE = torch.arange(1 , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = torch.remainder(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = remainders == 0 __SCREAMING_SNAKE_CASE = candidates[divisor_indices] __SCREAMING_SNAKE_CASE = torch.max(__UpperCAmelCase ) return largest_divisor, torch.div(__UpperCAmelCase , __UpperCAmelCase , rounding_mode="""floor""" ) class __a ( _snake_case ): @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" ) __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self._config.num_heads def UpperCAmelCase__ ( self : str ,lowerCamelCase : PreTrainedTokenizer ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[TensorType] = None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = super(lowerCamelCase ,self ).generate_dummy_inputs( lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase ) # We need to order the input in the way they appears in the forward() __SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __SCREAMING_SNAKE_CASE = seqlen + 2 __SCREAMING_SNAKE_CASE = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __SCREAMING_SNAKE_CASE = [ (torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers ) ] __SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""] if self.use_past: __SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype __SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 ) return ordered_inputs @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return 13
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
1
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' __SCREAMING_SNAKE_CASE = [int(__UpperCAmelCase ) for i in ip_va_address.split(""".""" ) if i.isdigit()] return len(__UpperCAmelCase ) == 4 and all(0 <= int(__UpperCAmelCase ) <= 254 for octet in octets ) if __name__ == "__main__": a = input().strip() a = "valid" if is_ip_va_address_valid(ip) else "invalid" print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" ) __SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)""" return soup.find("""div""" , class_=class_ ).find("""span""" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "OPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OPTForCausalLM", "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", "OPTForQuestionAnswering", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ "FlaxOPTForCausalLM", "FlaxOPTModel", "FlaxOPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase ) -> bool: '''simple docstring''' if num < 0: return False __SCREAMING_SNAKE_CASE = num __SCREAMING_SNAKE_CASE = 0 while num > 0: __SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
13
1
'''simple docstring''' from __future__ import annotations import typing from collections.abc import Iterable import numpy as np a = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 a = typing.Union[np.floataa, int, float] # noqa: UP007 def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> VectorOut: '''simple docstring''' return np.sqrt(np.sum((np.asarray(__UpperCAmelCase ) - np.asarray(__UpperCAmelCase )) ** 2 ) ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> VectorOut: '''simple docstring''' return sum((va - va) ** 2 for va, va in zip(__UpperCAmelCase , __UpperCAmelCase ) ) ** (1 / 2) if __name__ == "__main__": def __magic_name__ ( ) -> None: '''simple docstring''' from timeit import timeit print("""Without Numpy""" ) print( timeit( """euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) ) print("""With Numpy""" ) print( timeit( """euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) ) benchmark()
13
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a = list[list[float | int]] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __UpperCAmelCase ): for row in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(__UpperCAmelCase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase ) ] def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]: '''simple docstring''' __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(__UpperCAmelCase ): for col in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase ) def interpolated_func(__UpperCAmelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__UpperCAmelCase ) ) return interpolated_func def __magic_name__ ( __UpperCAmelCase ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ): x_val += 1 ret += poly(__UpperCAmelCase ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
13
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { "google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json", # See all ViT models at https://huggingface.co/models?filter=vit } class __a ( _snake_case ): __UpperCamelCase : str = 'vit' def __init__( self : Dict ,lowerCamelCase : Tuple=768 ,lowerCamelCase : Any=12 ,lowerCamelCase : Optional[int]=12 ,lowerCamelCase : List[str]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : int=0.0 ,lowerCamelCase : Optional[int]=0.02 ,lowerCamelCase : int=1E-1_2 ,lowerCamelCase : Optional[Any]=224 ,lowerCamelCase : int=16 ,lowerCamelCase : Optional[int]=3 ,lowerCamelCase : Tuple=True ,lowerCamelCase : List[Any]=16 ,**lowerCamelCase : Any ,): '''simple docstring''' super().__init__(**lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = qkv_bias __SCREAMING_SNAKE_CASE = encoder_stride class __a ( _snake_case ): __UpperCamelCase : Union[str, Any] = version.parse('1.11' ) @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return 1E-4
13
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a = logging.get_logger(__name__) @add_end_docstrings(_snake_case ) class __a ( _snake_case ): def __init__( self : Union[str, Any] ,**lowerCamelCase : str ): '''simple docstring''' super().__init__(**lowerCamelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ): '''simple docstring''' return super().__call__(lowerCamelCase ,**lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} if "candidate_labels" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: __SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ): '''simple docstring''' __SCREAMING_SNAKE_CASE = load_image(lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = candidate_labels __SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels] __SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase ) __SCREAMING_SNAKE_CASE = [text_inputs] return inputs def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" ) __SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = text_inputs[0] else: # Batching case. __SCREAMING_SNAKE_CASE = text_inputs[0][0] __SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ): '''simple docstring''' __SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" ) __SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0] if self.framework == "pt": __SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 ) __SCREAMING_SNAKE_CASE = probs.tolist() if not isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = [scores] elif self.framework == "tf": __SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 ) __SCREAMING_SNAKE_CASE = probs.numpy().tolist() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) __SCREAMING_SNAKE_CASE = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] ) ] return result
13
1
'''simple docstring''' import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a = logging.get_logger(__name__) a = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} a = { "vocab_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", }, "emoji_file": { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", }, } a = { "abeja/gpt-neox-japanese-2.7b": 2048, } def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = collections.OrderedDict() __SCREAMING_SNAKE_CASE = collections.OrderedDict() __SCREAMING_SNAKE_CASE = collections.OrderedDict() with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: __SCREAMING_SNAKE_CASE = f.readlines() __SCREAMING_SNAKE_CASE = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token] for idx, b in enumerate(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = b __SCREAMING_SNAKE_CASE = idx for wd in b: __SCREAMING_SNAKE_CASE = idx return vocab, raw_vocab, ids_to_tokens, emoji class __a ( _snake_case ): __UpperCamelCase : Tuple = VOCAB_FILES_NAMES __UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : Union[str, Any]="<|endoftext|>" ,lowerCamelCase : Optional[Any]="<|endoftext|>" ,lowerCamelCase : Any="<|startoftext|>" ,lowerCamelCase : int="<|endoftext|>" ,lowerCamelCase : Dict=False ,**lowerCamelCase : Optional[Any] ,): '''simple docstring''' super().__init__( unk_token=lowerCamelCase ,pad_token=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,do_clean_text=lowerCamelCase ,**lowerCamelCase ,) if not os.path.isfile(lowerCamelCase ): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" """ model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) if not os.path.isfile(lowerCamelCase ): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" """ pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) __SCREAMING_SNAKE_CASE = do_clean_text __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_vocab_and_emoji(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = SubWordJapaneseTokenizer( vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' return len(self.raw_vocab ) def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return dict(self.raw_vocab ,**self.added_tokens_encoder ) def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.subword_tokenizer.tokenize(lowerCamelCase ,clean=self.do_clean_text ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Tuple ): '''simple docstring''' return self.vocab.get(lowerCamelCase ,self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Union[str, Any] ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase ).strip() return out_string def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : "Conversation" ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) + [self.eos_token_id] ) if len(lowerCamelCase ) > self.model_max_length: __SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :] return input_ids def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 if os.path.isdir(lowerCamelCase ): __SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __SCREAMING_SNAKE_CASE = os.path.join( lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] ) else: __SCREAMING_SNAKE_CASE = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""] ) __SCREAMING_SNAKE_CASE = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""] ) with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" """ Please check that the vocabulary is not corrupted!""" ) __SCREAMING_SNAKE_CASE = token_index writer.write(""",""".join(lowerCamelCase ) + """\n""" ) index += 1 with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer: json.dump(self.emoji ,lowerCamelCase ) return vocab_file, emoji_file class __a ( _snake_case ): def __init__( self : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = vocab # same as swe __SCREAMING_SNAKE_CASE = ids_to_tokens # same as bpe __SCREAMING_SNAKE_CASE = emoji __SCREAMING_SNAKE_CASE = np.max([len(lowerCamelCase ) for w in self.vocab.keys()] ) __SCREAMING_SNAKE_CASE = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" ) __SCREAMING_SNAKE_CASE = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" ) __SCREAMING_SNAKE_CASE = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" ) __SCREAMING_SNAKE_CASE = re.compile( r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) __SCREAMING_SNAKE_CASE = re.compile( r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) __SCREAMING_SNAKE_CASE = re.compile( r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" ) __SCREAMING_SNAKE_CASE = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿""" __SCREAMING_SNAKE_CASE = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟""" __SCREAMING_SNAKE_CASE = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} ) def __len__( self : Tuple ): '''simple docstring''' return len(self.ids_to_tokens ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<URL>""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<EMAIL>""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<TEL>""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<DATE>""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<DATE>""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.content_repattera.sub("""<PRICE>""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __SCREAMING_SNAKE_CASE = content.replace("""<BLOCK><BLOCK>""" ,"""<BLOCK>""" ) return content def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any]=False ): '''simple docstring''' __SCREAMING_SNAKE_CASE = text.replace(""" """ ,"""<SP>""" ) __SCREAMING_SNAKE_CASE = text.replace(""" """ ,"""<SP>""" ) __SCREAMING_SNAKE_CASE = text.replace("""\r\n""" ,"""<BR>""" ) __SCREAMING_SNAKE_CASE = text.replace("""\n""" ,"""<BR>""" ) __SCREAMING_SNAKE_CASE = text.replace("""\r""" ,"""<BR>""" ) __SCREAMING_SNAKE_CASE = text.replace("""\t""" ,"""<TAB>""" ) __SCREAMING_SNAKE_CASE = text.replace("""—""" ,"""ー""" ) __SCREAMING_SNAKE_CASE = text.replace("""−""" ,"""ー""" ) for k, v in self.emoji["emoji"].items(): if k in text: __SCREAMING_SNAKE_CASE = text.replace(lowerCamelCase ,lowerCamelCase ) if clean: __SCREAMING_SNAKE_CASE = self.clean_text(lowerCamelCase ) def check_simbol(lowerCamelCase : List[str] ): __SCREAMING_SNAKE_CASE = x.encode() if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 2: __SCREAMING_SNAKE_CASE = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xC_2A1 and c <= 0xC_2BF) or (c >= 0xC_780 and c <= 0xC_783) or (c >= 0xC_AB9 and c <= 0xC_BBF) or (c >= 0xC_C80 and c <= 0xC_DA2) ): return True return False def checkuae(lowerCamelCase : Optional[Any] ): __SCREAMING_SNAKE_CASE = x.encode() if len(lowerCamelCase ) == 1 and len(lowerCamelCase ) == 3: __SCREAMING_SNAKE_CASE = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xE28_080 and c <= 0xE2B_07F: return True return False __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = [] while pos < len(lowerCamelCase ): __SCREAMING_SNAKE_CASE = min(len(lowerCamelCase ) ,pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3 __SCREAMING_SNAKE_CASE = [] # (token_id, token, pos) for e in range(lowerCamelCase ,lowerCamelCase ,-1 ): __SCREAMING_SNAKE_CASE = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(lowerCamelCase ) > 2: __SCREAMING_SNAKE_CASE = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(lowerCamelCase ) > 0: # the smallest token_id is adopted __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sorted(lowerCamelCase ,key=lambda lowerCamelCase : x[0] )[0] result.append(lowerCamelCase ) __SCREAMING_SNAKE_CASE = e else: __SCREAMING_SNAKE_CASE = pos + 1 __SCREAMING_SNAKE_CASE = text[pos:end] if check_simbol(lowerCamelCase ): result.append("""<KIGOU>""" ) elif checkuae(lowerCamelCase ): result.append("""<U2000U2BFF>""" ) else: for i in wd.encode("""utf-8""" ): result.append("""<|byte%d|>""" % i ) __SCREAMING_SNAKE_CASE = end return result def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : str="\n" ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(lowerCamelCase ) > 0: words.append(bytearray(lowerCamelCase ).decode("""utf-8""" ,errors="""replace""" ) ) __SCREAMING_SNAKE_CASE = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["""emoji_inv"""][word] ) elif word == "<SP>": words.append(""" """ ) elif word == "<BR>": words.append(lowerCamelCase ) elif word == "<TAB>": words.append("""\t""" ) elif word == "<BLOCK>": words.append("""▀""" ) elif word == "<KIGOU>": words.append("""ǀ""" ) elif word == "<U2000U2BFF>": words.append("""‖""" ) else: words.append(lowerCamelCase ) if len(lowerCamelCase ) > 0: words.append(bytearray(lowerCamelCase ).decode("""utf-8""" ,errors="""replace""" ) ) __SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase ) return text
13
'''simple docstring''' from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers a = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' require_version(deps[pkg] , __UpperCAmelCase )
13
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a = {"tokenization_byt5": ["ByT5Tokenizer"]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a = logging.getLogger(__name__) @dataclass class __a : __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) __UpperCamelCase : Optional[str] = field( default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, ) __UpperCamelCase : int = field( default=1024, metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) }, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[int] = field( default=_snake_case, metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) }, ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} ) __UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} ) def UpperCAmelCase__ ( self : int ): '''simple docstring''' if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" ) else: __SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." __SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __a : __UpperCamelCase : str = field( default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __UpperCamelCase : Optional[str] = field( default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, ) __UpperCamelCase : str = field( default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, ) __UpperCamelCase : bool = field( default=_snake_case, metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) }, ) def __magic_name__ ( ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) __SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. __SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: __SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1] __SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." __SCREAMING_SNAKE_CASE = data_args.test_file else: raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith(""".csv""" ): # Loading a dataset from local csv files __SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files __SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels __SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # load tapex tokenizer __SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , ) __SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Padding strategy if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __SCREAMING_SNAKE_CASE = False # Some models have set the order of the labels to use, so let's make sure we do use it. __SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1} __SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length ) def preprocess_tabfact_function(__UpperCAmelCase ): # Tokenize the texts def _convert_table_text_to_pandas(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )] __SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] ) return _table_pd __SCREAMING_SNAKE_CASE = examples["""statement"""] __SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) ) __SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = examples["""label"""] return result with training_args.main_process_first(desc="""dataset map pre-processing""" ): __SCREAMING_SNAKE_CASE = raw_datasets.map( __UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("""--do_train requires a train dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""train"""] if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("""--do_eval requires a validation dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""validation"""] if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("""--do_predict requires a test dataset""" ) __SCREAMING_SNAKE_CASE = raw_datasets["""test"""] if data_args.max_predict_samples is not None: __SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = default_data_collator elif training_args.fpaa: __SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 ) else: __SCREAMING_SNAKE_CASE = None # Initialize our Trainer __SCREAMING_SNAKE_CASE = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE = last_checkpoint __SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = train_result.metrics __SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __UpperCAmelCase ) trainer.save_metrics("""train""" , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics("""eval""" , __UpperCAmelCase ) trainer.save_metrics("""eval""" , __UpperCAmelCase ) if training_args.do_predict: logger.info("""*** Predict ***""" ) # Removing the `label` columns because it contains -1 and Trainer won't like that. __SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" ) __SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions __SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 ) __SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" ) if trainer.is_world_process_zero(): with open(__UpperCAmelCase , """w""" ) as writer: logger.info("""***** Predict Results *****""" ) writer.write("""index\tprediction\n""" ) for index, item in enumerate(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = label_list[item] writer.write(f"""{index}\t{item}\n""" ) __SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""} if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase ) -> Any: '''simple docstring''' main() if __name__ == "__main__": main()
13
1
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version a = get_logger(__name__) class __a : __UpperCamelCase : Tuple = 'dummy_data' __UpperCamelCase : Dict = 'datasets' __UpperCamelCase : int = False def __init__( self : List[str] ,lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : Union[Version, str] ,lowerCamelCase : Optional[str] = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[List[Callable]] = None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = dataset_name __SCREAMING_SNAKE_CASE = cache_dir __SCREAMING_SNAKE_CASE = use_local_dummy_data __SCREAMING_SNAKE_CASE = config # download_callbacks take a single url as input __SCREAMING_SNAKE_CASE = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __SCREAMING_SNAKE_CASE = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __SCREAMING_SNAKE_CASE = str(lowerCamelCase ) # to be downloaded __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' if self._dummy_file is None: __SCREAMING_SNAKE_CASE = self.download_dummy_data() return self._dummy_file @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("""dummy""" ,self.config.name ,self.version_name ) # structure is dummy / version_name return os.path.join("""dummy""" ,self.version_name ) @property def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' return os.path.join(self.dummy_data_folder ,"""dummy_data.zip""" ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __SCREAMING_SNAKE_CASE = cached_path( lowerCamelCase ,cache_dir=self.cache_dir ,extract_compressed_file=lowerCamelCase ,force_extract=lowerCamelCase ) return os.path.join(lowerCamelCase ,self.dummy_file_name ) @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file ) @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' if self._bucket_url is None: __SCREAMING_SNAKE_CASE = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"""/""" ) ) return self._bucket_url @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep ,"""/""" ).split("""/""" )[:-1] ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Dict ,*lowerCamelCase : Any ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested __SCREAMING_SNAKE_CASE = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __SCREAMING_SNAKE_CASE = self.dummy_file_name # special case when data_url is a dict if isinstance(lowerCamelCase ,lowerCamelCase ): return self.create_dummy_data_dict(lowerCamelCase ,lowerCamelCase ) elif isinstance(lowerCamelCase ,(list, tuple) ): return self.create_dummy_data_list(lowerCamelCase ,lowerCamelCase ) else: return self.create_dummy_data_single(lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : str ,*lowerCamelCase : Any ): '''simple docstring''' return self.download_and_extract(lowerCamelCase ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.download_and_extract(lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Union[str, Any] ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : Any ): '''simple docstring''' return path def UpperCAmelCase__ ( self : int ): '''simple docstring''' return {} def UpperCAmelCase__ ( self : str ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowerCamelCase ,lowerCamelCase ): for single_url in single_urls: download_callback(lowerCamelCase ) else: __SCREAMING_SNAKE_CASE = single_urls download_callback(lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = [os.path.join(lowerCamelCase ,urllib.parse.quote_plus(Path(lowerCamelCase ).name ) ) for x in single_urls] else: __SCREAMING_SNAKE_CASE = single_urls __SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,urllib.parse.quote_plus(Path(lowerCamelCase ).name ) ) __SCREAMING_SNAKE_CASE = value # make sure that values are unique if all(isinstance(lowerCamelCase ,lowerCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __SCREAMING_SNAKE_CASE = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Any ,lowerCamelCase : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __SCREAMING_SNAKE_CASE = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" ,lowerCamelCase ) ) for url in data_url ) __SCREAMING_SNAKE_CASE = all( url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __SCREAMING_SNAKE_CASE = [data_url[0]] * len(lowerCamelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) ) dummy_data_list.append(lowerCamelCase ) return dummy_data_list def UpperCAmelCase__ ( self : str ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(lowerCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) ) if os.path.exists(lowerCamelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCAmelCase__ ( self : int ): '''simple docstring''' pass def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' pass def UpperCAmelCase__ ( self : int ,lowerCamelCase : Tuple ): '''simple docstring''' def _iter_archive_members(lowerCamelCase : Union[str, Any] ): # this preserves the order of the members inside the ZIP archive __SCREAMING_SNAKE_CASE = Path(self.dummy_file ).parent __SCREAMING_SNAKE_CASE = path.relative_to(lowerCamelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __SCREAMING_SNAKE_CASE = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowerCamelCase ) __SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) __SCREAMING_SNAKE_CASE = _iter_archive_members(lowerCamelCase ) if self.use_local_dummy_data else path.rglob("""*""" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ): yield file_path.relative_to(lowerCamelCase ).as_posix(), file_path.open("""rb""" ) def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Union[str, Any] ): '''simple docstring''' if not isinstance(lowerCamelCase ,lowerCamelCase ): __SCREAMING_SNAKE_CASE = [paths] for path in paths: if os.path.isfile(lowerCamelCase ): if os.path.basename(lowerCamelCase ).startswith((""".""", """__""") ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowerCamelCase ): if os.path.basename(lowerCamelCase ).startswith((""".""", """__""") ): continue dirnames.sort() for filename in sorted(lowerCamelCase ): if filename.startswith((""".""", """__""") ): continue yield os.path.join(lowerCamelCase ,lowerCamelCase )
13
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
13
1
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput a = 8 def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=BITS ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = x.device __SCREAMING_SNAKE_CASE = (x * 255).int().clamp(0 , 255 ) __SCREAMING_SNAKE_CASE = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = rearrange(__UpperCAmelCase , """d -> d 1 1""" ) __SCREAMING_SNAKE_CASE = rearrange(__UpperCAmelCase , """b c h w -> b c 1 h w""" ) __SCREAMING_SNAKE_CASE = ((x & mask) != 0).float() __SCREAMING_SNAKE_CASE = rearrange(__UpperCAmelCase , """b c d h w -> b (c d) h w""" ) __SCREAMING_SNAKE_CASE = bits * 2 - 1 return bits def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=BITS ) -> Optional[int]: '''simple docstring''' __SCREAMING_SNAKE_CASE = x.device __SCREAMING_SNAKE_CASE = (x > 0).int() __SCREAMING_SNAKE_CASE = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__UpperCAmelCase , dtype=torch.intaa ) __SCREAMING_SNAKE_CASE = rearrange(__UpperCAmelCase , """d -> d 1 1""" ) __SCREAMING_SNAKE_CASE = rearrange(__UpperCAmelCase , """b (c d) h w -> b c d h w""" , d=8 ) __SCREAMING_SNAKE_CASE = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" ) return (dec / 255).clamp(0.0 , 1.0 ) def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 , __UpperCAmelCase = True , __UpperCAmelCase=None , __UpperCAmelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]: '''simple docstring''' if self.num_inference_steps is None: raise ValueError( """Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) __SCREAMING_SNAKE_CASE = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas __SCREAMING_SNAKE_CASE = self.alphas_cumprod[timestep] __SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" __SCREAMING_SNAKE_CASE = self.bit_scale if self.config.clip_sample: __SCREAMING_SNAKE_CASE = torch.clamp(__UpperCAmelCase , -scale , __UpperCAmelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) __SCREAMING_SNAKE_CASE = self._get_variance(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide __SCREAMING_SNAKE_CASE = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 __SCREAMING_SNAKE_CASE = model_output.device if torch.is_tensor(__UpperCAmelCase ) else """cpu""" __SCREAMING_SNAKE_CASE = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase ).to(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = self._get_variance(__UpperCAmelCase , __UpperCAmelCase ) ** 0.5 * eta * noise __SCREAMING_SNAKE_CASE = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase ) def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="epsilon" , __UpperCAmelCase=None , __UpperCAmelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]: '''simple docstring''' __SCREAMING_SNAKE_CASE = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 ) else: __SCREAMING_SNAKE_CASE = None # 1. compute alphas, betas __SCREAMING_SNAKE_CASE = self.alphas_cumprod[t] __SCREAMING_SNAKE_CASE = self.alphas_cumprod[t - 1] if t > 0 else self.one __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": __SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": __SCREAMING_SNAKE_CASE = model_output else: raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" ) # 3. Clip "predicted x_0" __SCREAMING_SNAKE_CASE = self.bit_scale if self.config.clip_sample: __SCREAMING_SNAKE_CASE = torch.clamp(__UpperCAmelCase , -scale , __UpperCAmelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t __SCREAMING_SNAKE_CASE = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __SCREAMING_SNAKE_CASE = 0 if t > 0: __SCREAMING_SNAKE_CASE = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__UpperCAmelCase ).to(model_output.device ) __SCREAMING_SNAKE_CASE = (self._get_variance(__UpperCAmelCase , predicted_variance=__UpperCAmelCase ) ** 0.5) * noise __SCREAMING_SNAKE_CASE = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase ) class __a ( _snake_case ): def __init__( self : Optional[Any] ,lowerCamelCase : UNetaDConditionModel ,lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] ,lowerCamelCase : Optional[float] = 1.0 ,): '''simple docstring''' super().__init__() __SCREAMING_SNAKE_CASE = bit_scale __SCREAMING_SNAKE_CASE = ( ddim_bit_scheduler_step if isinstance(lowerCamelCase ,lowerCamelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=lowerCamelCase ,scheduler=lowerCamelCase ) @torch.no_grad() def __call__( self : Optional[Any] ,lowerCamelCase : Optional[int] = 256 ,lowerCamelCase : Optional[int] = 256 ,lowerCamelCase : Optional[int] = 50 ,lowerCamelCase : Optional[torch.Generator] = None ,lowerCamelCase : Optional[int] = 1 ,lowerCamelCase : Optional[str] = "pil" ,lowerCamelCase : bool = True ,**lowerCamelCase : List[Any] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = torch.randn( (batch_size, self.unet.config.in_channels, height, width) ,generator=lowerCamelCase ,) __SCREAMING_SNAKE_CASE = decimal_to_bits(lowerCamelCase ) * self.bit_scale __SCREAMING_SNAKE_CASE = latents.to(self.device ) self.scheduler.set_timesteps(lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual __SCREAMING_SNAKE_CASE = self.unet(lowerCamelCase ,lowerCamelCase ).sample # compute the previous noisy sample x_t -> x_t-1 __SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ).prev_sample __SCREAMING_SNAKE_CASE = bits_to_decimal(lowerCamelCase ) if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase )
13
'''simple docstring''' import requests from bsa import BeautifulSoup def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" ) __SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} ) __SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" ) return anchors[2].get_text() if __name__ == "__main__": a = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 30, "pages": "3979-3990", "year": 2018, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
13
1
'''simple docstring''' from collections.abc import Sequence from queue import Queue class __a : def __init__( self : str ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[str]=None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = start __SCREAMING_SNAKE_CASE = end __SCREAMING_SNAKE_CASE = val __SCREAMING_SNAKE_CASE = (start + end) // 2 __SCREAMING_SNAKE_CASE = left __SCREAMING_SNAKE_CASE = right def __repr__( self : List[str] ): '''simple docstring''' return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class __a : def __init__( self : int ,lowerCamelCase : Sequence ,lowerCamelCase : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = collection __SCREAMING_SNAKE_CASE = function if self.collection: __SCREAMING_SNAKE_CASE = self._build_tree(0 ,len(lowerCamelCase ) - 1 ) def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : int ): '''simple docstring''' self._update_tree(self.root ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : List[Any] ): '''simple docstring''' return self._query_range(self.root ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : str ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ): '''simple docstring''' if start == end: return SegmentTreeNode(lowerCamelCase ,lowerCamelCase ,self.collection[start] ) __SCREAMING_SNAKE_CASE = (start + end) // 2 __SCREAMING_SNAKE_CASE = self._build_tree(lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self._build_tree(mid + 1 ,lowerCamelCase ) return SegmentTreeNode(lowerCamelCase ,lowerCamelCase ,self.fn(left.val ,right.val ) ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : str ): '''simple docstring''' if node.start == i and node.end == i: __SCREAMING_SNAKE_CASE = val return if i <= node.mid: self._update_tree(node.left ,lowerCamelCase ,lowerCamelCase ) else: self._update_tree(node.right ,lowerCamelCase ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.fn(node.left.val ,node.right.val ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : str ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left ,lowerCamelCase ,lowerCamelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left ,lowerCamelCase ,node.mid ) ,self._query_range(node.right ,node.mid + 1 ,lowerCamelCase ) ,) else: # range in right child tree return self._query_range(node.right ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' if self.root is not None: __SCREAMING_SNAKE_CASE = Queue() queue.put(self.root ) while not queue.empty(): __SCREAMING_SNAKE_CASE = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("*" * 50) a = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
13
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class __a ( _snake_case ): __UpperCamelCase : Tuple = 'camembert' def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class __a ( _snake_case ): @property def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
13
1