code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { 'configuration_bigbird_pegasus': [ 'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BigBirdPegasusConfig', 'BigBirdPegasusOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST', 'BigBirdPegasusForCausalLM', 'BigBirdPegasusForConditionalGeneration', 'BigBirdPegasusForQuestionAnswering', 'BigBirdPegasusForSequenceClassification', 'BigBirdPegasusModel', 'BigBirdPegasusPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import baseaa def lowerCamelCase ( lowerCamelCase : str): return baseaa.aaaencode(string.encode("""utf-8""")) def lowerCamelCase ( lowerCamelCase : bytes): return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""") if __name__ == "__main__": import doctest doctest.testmod()
27
1
'''simple docstring''' from __future__ import annotations __magic_name__ = '#' class __lowerCAmelCase : '''simple docstring''' def __init__( self : str ): '''simple docstring''' A_ : dict = {} def _a ( self : List[str] ,_a : str ): '''simple docstring''' A_ : Optional[Any] = self._trie for char in text: if char not in trie: A_ : Dict = {} A_ : List[Any] = trie[char] A_ : Optional[Any] = True def _a ( self : Optional[int] ,_a : str ): '''simple docstring''' A_ : Union[str, Any] = self._trie for char in prefix: if char in trie: A_ : Optional[Any] = trie[char] else: return [] return self._elements(_a ) def _a ( self : Dict ,_a : dict ): '''simple docstring''' A_ : Optional[Any] = [] for c, v in d.items(): A_ : List[str] = [""" """] if c == END else [(c + s) for s in self._elements(_a )] result.extend(_a ) return tuple(_a ) __magic_name__ = Trie() __magic_name__ = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal') for word in words: trie.insert_word(word) def lowerCamelCase ( lowerCamelCase : str): A_ : str = trie.find_word(lowerCamelCase) return tuple(string + word for word in suffixes) def lowerCamelCase ( ): print(autocomplete_using_trie("""de""")) if __name__ == "__main__": import doctest doctest.testmod() main()
27
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowerCamelCase ( lowerCamelCase : Optional[Any]): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def lowerCamelCase ( lowerCamelCase : str): # word like '180' or '身高' or '神' for char in word: A_ : Optional[Any] = ord(lowerCamelCase) if not _is_chinese_char(lowerCamelCase): return 0 return 1 def lowerCamelCase ( lowerCamelCase : List[str]): A_ : Any = set() for token in tokens: A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase) if chinese_word: word_set.add(lowerCamelCase) A_ : Any = list(lowerCamelCase) return word_list def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()): if not chinese_word_set: return bert_tokens A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set]) A_ : str = bert_tokens A_ , A_ : Any = 0, len(lowerCamelCase) while start < end: A_ : Tuple = True if is_chinese(bert_word[start]): A_ : List[str] = min(end - start , lowerCamelCase) for i in range(lowerCamelCase , 1 , -1): A_ : Tuple = """""".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1 , start + i): A_ : Dict = """##""" + bert_word[j] A_ : str = start + i A_ : Dict = False break if single_word: start += 1 return bert_word def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer): A_ : Union[str, Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws A_ : int = [get_chinese_word(lowerCamelCase) for r in res] ltp_res.extend(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : List[Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512) bert_res.extend(res["""input_ids"""]) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Union[str, Any] = [] for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase): A_ : List[Any] = [] for id in input_ids: A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase) input_tokens.append(lowerCamelCase) A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase) A_ : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCamelCase): if token[:2] == "##": A_ : Optional[Any] = token[2:] # save chinese tokens' pos if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)): ref_id.append(lowerCamelCase) ref_ids.append(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) return ref_ids def lowerCamelCase ( lowerCamelCase : Tuple): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""") as f: A_ : Optional[int] = f.readlines() A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device A_ : Dict = BertTokenizer.from_pretrained(args.bert) A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase) with open(args.save_path , """w""" , encoding="""utf-8""") as f: A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids] f.writelines(lowerCamelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) __magic_name__ = parser.parse_args() main(args)
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __magic_name__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['MLukeTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """ViltImageProcessor""" a_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ): '''simple docstring''' A_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a ,_a ) A_ : Optional[Any] = self.image_processor def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,): '''simple docstring''' A_ : int = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) # add pixel_values + pixel_mask A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a ) encoding.update(_a ) return encoding def _a ( self : List[Any] ,*_a : Any ,**_a : Any ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : int ,*_a : int ,**_a : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = self.tokenizer.model_input_names A_ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : str ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,) return self.image_processor_class @property def _a ( self : int ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,) return self.image_processor
27
1
'''simple docstring''' import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin __magic_name__ = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class __lowerCAmelCase : '''simple docstring''' def __init__( self : Tuple ,_a : Optional[Any] ,_a : List[Any]=16 ,_a : int=13 ,_a : Dict=7 ,_a : Union[str, Any]=14 ,_a : Dict=10 ,_a : Union[str, Any]=19 ,_a : Dict=5 ,_a : Any=4 ,_a : Tuple=True ,_a : List[Any]=16 ,_a : Optional[int]=2 ,_a : Tuple=4 ,_a : Dict=4 ,_a : List[Any]="gelu" ,_a : Union[str, Any]=0.1 ,_a : str=0.1 ,_a : Any=[1, 2, 3, 4, 5] ,_a : Union[str, Any]=25 ,_a : List[Any]=5 ,): '''simple docstring''' A_ : Any = d_model A_ : Optional[Any] = parent A_ : Union[str, Any] = batch_size A_ : int = prediction_length A_ : str = context_length A_ : Dict = cardinality A_ : Optional[Any] = num_time_features A_ : Optional[int] = lags_sequence A_ : int = embedding_dimension A_ : int = is_training A_ : List[str] = hidden_size A_ : Dict = num_hidden_layers A_ : str = num_attention_heads A_ : Dict = intermediate_size A_ : int = hidden_act A_ : str = hidden_dropout_prob A_ : int = attention_probs_dropout_prob A_ : List[Any] = context_length A_ : Union[str, Any] = prediction_length + label_length A_ : int = label_length A_ : Dict = moving_average A_ : int = autocorrelation_factor def _a ( self : List[Any] ): '''simple docstring''' return AutoformerConfig( d_model=self.d_model ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,prediction_length=self.prediction_length ,context_length=self.context_length ,label_length=self.label_length ,lags_sequence=self.lags_sequence ,num_time_features=self.num_time_features ,num_static_categorical_features=1 ,cardinality=[self.cardinality] ,embedding_dimension=[self.embedding_dimension] ,moving_average=self.moving_average ,) def _a ( self : int ,_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = config.context_length + max(config.lags_sequence ) A_ : Optional[Any] = ids_tensor([self.batch_size, 1] ,config.cardinality[0] ) A_ : List[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) A_ : Any = floats_tensor([self.batch_size, _past_length] ) A_ : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs A_ : Tuple = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) A_ : Dict = floats_tensor([self.batch_size, config.prediction_length] ) A_ : Any = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def _a ( self : Dict ): '''simple docstring''' A_ : List[Any] = self.get_config() A_ : int = self.prepare_autoformer_inputs_dict(_a ) return config, inputs_dict def _a ( self : Dict ): '''simple docstring''' A_ , A_ : List[str] = self.prepare_config_and_inputs() return config, inputs_dict def _a ( self : Dict ,_a : Dict ,_a : Dict ): '''simple docstring''' A_ : Optional[Any] = AutoformerModel(config=_a ).to(_a ).eval() A_ : Optional[Any] = model(**_a ) A_ : Tuple = outputs.encoder_last_hidden_state A_ : Any = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: A_ : Optional[Any] = model.get_encoder() encoder.save_pretrained(_a ) A_ : Tuple = AutoformerEncoder.from_pretrained(_a ).to(_a ) A_ , A_ , A_ , A_ , A_ : List[Any] = model.create_network_inputs(**_a ) A_ , A_ : Any = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) A_ : str = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) ,dim=-1 ,) A_ : int = encoder(inputs_embeds=_a )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) A_ : List[str] = ( torch.mean(transformer_inputs[:, : config.context_length, ...] ,dim=1 ) .unsqueeze(1 ) .repeat(1 ,config.prediction_length ,1 ) ) A_ : Optional[int] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] ,device=enc_input.device ,) A_ : int = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) ,dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) ,dim=-1 ,) A_ : List[str] = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) ,dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) ,dim=-1 ,) with tempfile.TemporaryDirectory() as tmpdirname: A_ : Tuple = model.get_decoder() decoder.save_pretrained(_a ) A_ : Any = AutoformerDecoder.from_pretrained(_a ).to(_a ) A_ : Any = decoder( trend=_a ,inputs_embeds=_a ,encoder_hidden_states=_a ,)[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () a_ = (AutoformerForPrediction,) if is_torch_available() else () a_ = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {} a_ = False a_ = False a_ = False a_ = False a_ = False a_ = False def _a ( self : str ): '''simple docstring''' A_ : List[str] = AutoformerModelTester(self ) A_ : Tuple = ConfigTester(self ,config_class=_a ,has_text_modality=_a ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Dict ): '''simple docstring''' A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: A_ : List[str] = model_class(_a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_a ) A_ , A_ : Any = model_class.from_pretrained(_a ,output_loading_info=_a ) self.assertEqual(info["""missing_keys"""] ,[] ) def _a ( self : Any ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_a ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def _a ( self : List[str] ): '''simple docstring''' pass def _a ( self : List[Any] ): '''simple docstring''' A_ : int = inspect.signature(getattr(_a ,"""forward""" ) ) # The main input is the name of the argument after `self` A_ : str = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name ,_a ) def _a ( self : str ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(_a ) A_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Union[str, Any] = [*signature.parameters.keys()] A_ : str = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(_a )] ,_a ) def _a ( self : Dict ): '''simple docstring''' A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() A_ : int = True A_ : str = getattr(self.model_tester ,"""seq_length""" ,_a ) A_ : Union[str, Any] = getattr(self.model_tester ,"""decoder_seq_length""" ,_a ) A_ : int = getattr(self.model_tester ,"""encoder_seq_length""" ,_a ) A_ : Union[str, Any] = getattr(self.model_tester ,"""d_model""" ,_a ) A_ : List[str] = getattr(self.model_tester ,"""num_attention_heads""" ,_a ) A_ : Tuple = d_model // num_attention_heads for model_class in self.all_model_classes: A_ : Dict = True A_ : int = False A_ : Optional[int] = True A_ : Any = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): A_ : Optional[Any] = model(**self._prepare_for_class(_a ,_a ) ) A_ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A_ : Tuple = True A_ : List[Any] = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): A_ : Dict = model(**self._prepare_for_class(_a ,_a ) ) A_ : int = outputs.encoder_attentions self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,) A_ : List[Any] = len(_a ) A_ : Dict = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(_a ,_a ) # decoder attentions A_ : Any = outputs.decoder_attentions self.assertIsInstance(_a ,(list, tuple) ) self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,) # cross attentions A_ : Any = outputs.cross_attentions self.assertIsInstance(_a ,(list, tuple) ) self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,) # Check attention is always last and order is fine A_ : int = True A_ : Dict = True A_ : Dict = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): A_ : Any = model(**self._prepare_for_class(_a ,_a ) ) self.assertEqual(out_len + 2 ,len(_a ) ) A_ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,) @is_flaky() def _a ( self : str ): '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def lowerCamelCase ( lowerCamelCase : Tuple="train-batch.pt"): A_ : Optional[int] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowerCamelCase , repo_type="""dataset""") A_ : Optional[Any] = torch.load(lowerCamelCase , map_location=lowerCamelCase) return batch @require_torch @slow class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Any ): '''simple docstring''' A_ : Optional[int] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) A_ : Optional[int] = prepare_batch() with torch.no_grad(): A_ : Optional[Any] = model( past_values=batch["""past_values"""] ,past_time_features=batch["""past_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,static_categorical_features=batch["""static_categorical_features"""] ,future_values=batch["""future_values"""] ,future_time_features=batch["""future_time_features"""] ,)[0] A_ : int = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape ,_a ) A_ : Optional[Any] = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] ,device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] ,_a ,atol=_a ) ) def _a ( self : List[str] ): '''simple docstring''' A_ : str = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) A_ : Optional[Any] = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): A_ : Dict = model( past_values=batch["""past_values"""] ,past_time_features=batch["""past_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,static_categorical_features=batch["""static_categorical_features"""] ,).encoder_last_hidden_state A_ : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape ,_a ) A_ : Union[str, Any] = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] ,device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] ,_a ,atol=_a ) ) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) A_ : str = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): A_ : Union[str, Any] = model.generate( static_categorical_features=batch["""static_categorical_features"""] ,past_time_features=batch["""past_time_features"""] ,past_values=batch["""past_values"""] ,future_time_features=batch["""future_time_features"""] ,past_observed_mask=batch["""past_observed_mask"""] ,) A_ : int = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape ,_a ) A_ : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] ,device=_a ) A_ : Tuple = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] ,_a ,rtol=1e-1 ) )
27
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""torch""", """torchsde"""] def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ): '''simple docstring''' requires_backends(self ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] )
27
1
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __magic_name__ = logging.get_logger(__name__) __magic_name__ = 'T5Config' class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """mt5""" a_ = MTaConfig class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """mt5""" a_ = MTaConfig class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """mt5""" a_ = MTaConfig
27
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"): A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {} A_ : Optional[int] = padding_side return tokenizer( [line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , ) def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ): A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,): '''simple docstring''' super().__init__() A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" ) A_ : Any = Path(_a ).joinpath(type_path + """.target""" ) A_ : Dict = self.get_char_lens(self.src_file ) A_ : Optional[int] = max_source_length A_ : List[str] = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' A_ : List[Any] = tokenizer A_ : Optional[Any] = prefix if n_obs is not None: A_ : Any = self.src_lens[:n_obs] A_ : Optional[int] = src_lang A_ : Tuple = tgt_lang def __len__( self : Tuple ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[str] ,_a : Tuple ): '''simple docstring''' A_ : int = index + 1 # linecache starts at 1 A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" ) A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ : List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer ) A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" ) A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" ) A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze() A_ : Dict = target_inputs["""input_ids"""].squeeze() A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _a ( _a : int ): '''simple docstring''' return [len(_a ) for x in Path(_a ).open().readlines()] def _a ( self : Optional[int] ,_a : Dict ): '''simple docstring''' A_ : str = torch.stack([x["""input_ids"""] for x in batch] ) A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] ) A_ : Union[str, Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : List[str] = trim_batch(_a ,_a ) A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a ) A_ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __magic_name__ = getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[List]): return list(itertools.chain.from_iterable(lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : str): A_ : Union[str, Any] = get_git_info() save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json""")) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]): with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Any): with open(lowerCamelCase) as f: return json.load(lowerCamelCase) def lowerCamelCase ( ): A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase) A_ : Union[str, Any] = { """repo_id""": str(lowerCamelCase), """repo_sha""": str(repo.head.object.hexsha), """repo_branch""": str(repo.active_branch), """hostname""": str(socket.gethostname()), } return repo_infos def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable): return list(map(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]): with open(lowerCamelCase , """wb""") as f: return pickle.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str]): def remove_articles(lowerCamelCase : Any): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase) def white_space_fix(lowerCamelCase : List[Any]): return " ".join(text.split()) def remove_punc(lowerCamelCase : Union[str, Any]): A_ : Optional[int] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(lowerCamelCase : List[str]): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase)))) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): A_ : Tuple = normalize_answer(lowerCamelCase).split() A_ : Dict = normalize_answer(lowerCamelCase).split() A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase) A_ : Any = sum(common.values()) if num_same == 0: return 0 A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any): return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]): assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Any = 0 for hypo, pred in zip(lowerCamelCase , lowerCamelCase): em += exact_match_score(lowerCamelCase , lowerCamelCase) if len(lowerCamelCase) > 0: em /= len(lowerCamelCase) return {"em": em} def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return model_prefix.startswith("""rag""") def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]): A_ : Optional[Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ : Tuple = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase): if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) continue A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p] setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) return hparams, config
27
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict ,_a : Union[str, Any] ,_a : Optional[Any]=13 ,_a : List[str]=30 ,_a : List[str]=2 ,_a : Dict=3 ,_a : Tuple=True ,_a : Optional[int]=True ,_a : Any=32 ,_a : List[str]=5 ,_a : Optional[int]=4 ,_a : List[Any]=37 ,_a : int="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Union[str, Any]=10 ,_a : Tuple=0.02 ,): '''simple docstring''' A_ : Union[str, Any] = parent A_ : Optional[Any] = batch_size A_ : Optional[int] = image_size A_ : List[Any] = patch_size A_ : Optional[int] = num_channels A_ : List[str] = is_training A_ : Union[str, Any] = use_labels A_ : int = hidden_size A_ : Optional[int] = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Union[str, Any] = intermediate_size A_ : Optional[int] = hidden_act A_ : Tuple = hidden_dropout_prob A_ : Optional[Any] = attention_probs_dropout_prob A_ : int = type_sequence_label_size A_ : List[str] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Any = (image_size // patch_size) ** 2 A_ : Optional[Any] = num_patches + 1 def _a ( self : Tuple ): '''simple docstring''' A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : str = ViTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_a ,initializer_range=self.initializer_range ,) return config, pixel_values def _a ( self : Optional[int] ,_a : Tuple ,_a : Tuple ): '''simple docstring''' A_ : Any = FlaxViTModel(config=_a ) A_ : str = model(_a ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) A_ : str = (self.image_size, self.image_size) A_ : List[str] = (self.patch_size, self.patch_size) A_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) ) def _a ( self : List[Any] ,_a : Any ,_a : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.type_sequence_label_size A_ : Tuple = FlaxViTForImageClassification(config=_a ) A_ : int = model(_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : List[str] = 1 A_ : str = FlaxViTForImageClassification(_a ) A_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Any = model(_a ) def _a ( self : int ): '''simple docstring''' A_ : List[str] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ) : int = config_and_inputs A_ : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _a ( self : Dict ): '''simple docstring''' A_ : List[str] = FlaxViTModelTester(self ) A_ : str = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 ) def _a ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : List[Any] ): '''simple docstring''' A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(_a ) A_ : Optional[int] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[str] = [*signature.parameters.keys()] A_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_a ) def _a ( self : List[str] ): '''simple docstring''' A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A_ : List[str] = self._prepare_for_class(_a ,_a ) A_ : Tuple = model_class(_a ) @jax.jit def model_jitted(_a : Dict ,**_a : Dict ): return model(pixel_values=_a ,**_a ) with self.subTest("""JIT Enabled""" ): A_ : List[str] = model_jitted(**_a ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): A_ : Union[str, Any] = model_jitted(**_a ).to_tuple() self.assertEqual(len(_a ) ,len(_a ) ) for jitted_output, output in zip(_a ,_a ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def _a ( self : List[str] ): '''simple docstring''' for model_class_name in self.all_model_classes: A_ : int = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) A_ : str = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(_a )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = KandinskyVaaControlnetPipeline a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a_ = False @property def _a ( self : Any ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return self.time_input_dim @property def _a ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): '''simple docstring''' return 100 @property def _a ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[Any] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } A_ : Tuple = UNetaDConditionModel(**_a ) return model @property def _a ( self : List[str] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _a ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) A_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : List[str] ): '''simple docstring''' A_ : Optional[Any] = self.dummy_unet A_ : int = self.dummy_movq A_ : Tuple = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,) A_ : int = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ): '''simple docstring''' A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a ) A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( _a ) # create hint A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("""mps""" ): A_ : Optional[Any] = torch.manual_seed(_a ) else: A_ : str = torch.Generator(device=_a ).manual_seed(_a ) A_ : List[Any] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def _a ( self : Dict ): '''simple docstring''' A_ : List[Any] = """cpu""" A_ : List[str] = self.get_dummy_components() A_ : Tuple = self.pipeline_class(**_a ) A_ : Dict = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) ) A_ : Tuple = output.images A_ : Optional[Any] = pipe( **self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0] A_ : Tuple = image[0, -3:, -3:, -1] A_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[Any] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Any ): '''simple docstring''' A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) A_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0 A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(_a ) A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) A_ : Union[str, Any] = pipeline.to(_a ) pipeline.set_progress_bar_config(disable=_a ) A_ : Optional[Any] = """A robot, 4k photo""" A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ , A_ : List[str] = pipe_prior( _a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ : List[Any] = pipeline( image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,) A_ : Dict = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_a ,_a )
27
1
'''simple docstring''' from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """Pix2StructImageProcessor""" a_ = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self : Union[str, Any] ,_a : List[str] ,_a : int ): '''simple docstring''' A_ : Tuple = False super().__init__(_a ,_a ) def __call__( self : Tuple ,_a : Tuple=None ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : Optional[int] = 2048 ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Dict ,): '''simple docstring''' if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None and not self.image_processor.is_vqa: A_ : str = self.tokenizer A_ : List[Any] = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A_ : Optional[int] = self.image_processor( _a ,return_tensors=_a ,max_patches=_a ,**_a ) else: # add pixel_values and bbox A_ : List[str] = self.image_processor( _a ,return_tensors=_a ,max_patches=_a ,header_text=_a ,**_a ) if text is not None and not self.image_processor.is_vqa: A_ : Optional[Any] = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) if "attention_mask" in text_encoding: A_ : Union[str, Any] = text_encoding.pop("""attention_mask""" ) if "input_ids" in text_encoding: A_ : Tuple = text_encoding.pop("""input_ids""" ) else: A_ : Tuple = None if text_encoding is not None: encoding_image_processor.update(_a ) return encoding_image_processor def _a ( self : Optional[int] ,*_a : List[str] ,**_a : str ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : Tuple ,*_a : Optional[int] ,**_a : Union[str, Any] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : str ): '''simple docstring''' A_ : Optional[int] = self.tokenizer.model_input_names A_ : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
27
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """deberta-v2""" def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,): '''simple docstring''' super().__init__(**_a ) A_ : Union[str, Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[Any] = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : List[Any] = initializer_range A_ : int = relative_attention A_ : Tuple = max_relative_positions A_ : int = pad_token_id A_ : Tuple = position_biased_input # Backwards compatibility if type(_a ) == str: A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )] A_ : Any = pos_att_type A_ : Optional[int] = vocab_size A_ : Tuple = layer_norm_eps A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a ) A_ : Union[str, Any] = pooler_dropout A_ : List[Any] = pooler_hidden_act class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : Any = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def _a ( self : Optional[int] ): '''simple docstring''' return 12 def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __magic_name__ = { 'configuration_blip': [ 'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlipConfig', 'BlipTextConfig', 'BlipVisionConfig', ], 'processing_blip': ['BlipProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['BlipImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlipModel', 'BlipPreTrainedModel', 'BlipForConditionalGeneration', 'BlipForQuestionAnswering', 'BlipVisionModel', 'BlipTextModel', 'BlipForImageTextRetrieval', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFBlipModel', 'TFBlipPreTrainedModel', 'TFBlipForConditionalGeneration', 'TFBlipForQuestionAnswering', 'TFBlipVisionModel', 'TFBlipTextModel', 'TFBlipForImageTextRetrieval', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') __magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) __magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) __magic_name__ = BeautifulSoup(res.text, 'html.parser') __magic_name__ = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(f"""https://google.com{link.get('href')}""")
27
1
'''simple docstring''' import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Tuple ,_a : Optional[int] ,_a : Optional[int]=13 ,_a : Dict=7 ,_a : Any=True ,_a : List[Any]=True ,_a : Dict=False ,_a : List[str]=True ,_a : List[str]=99 ,_a : List[str]=32 ,_a : Optional[int]=5 ,_a : str=4 ,_a : Dict=64 ,_a : Optional[Any]="gelu" ,_a : Tuple=0.1 ,_a : Union[str, Any]=0.1 ,_a : Dict=512 ,_a : Tuple=16 ,_a : str=2 ,_a : str=0.02 ,_a : Optional[int]=3 ,_a : Union[str, Any]=4 ,_a : List[Any]=None ,_a : int=2 ,_a : List[str]=2 ,_a : Tuple=2 ,_a : Dict=2 ,_a : str=4 ,_a : Optional[Any]=1 ,): '''simple docstring''' A_ : Optional[int] = parent A_ : int = batch_size A_ : str = seq_length A_ : Any = is_training A_ : Dict = use_input_mask A_ : int = use_token_type_ids A_ : Optional[Any] = use_labels A_ : str = vocab_size A_ : Optional[Any] = hidden_size A_ : Dict = num_hidden_layers A_ : List[str] = num_attention_heads A_ : Tuple = intermediate_size A_ : Any = hidden_act A_ : Tuple = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Optional[int] = max_position_embeddings A_ : List[Any] = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : Optional[int] = initializer_range A_ : Any = num_labels A_ : Dict = num_choices A_ : List[Any] = scope A_ : List[str] = q_groups A_ : Optional[Any] = k_groups A_ : int = v_groups A_ : Optional[int] = post_attention_groups A_ : Any = intermediate_groups A_ : Optional[Any] = output_groups def _a ( self : List[str] ): '''simple docstring''' A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : Optional[int] = None if self.use_input_mask: A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Dict = None A_ : Union[str, Any] = None A_ : Any = None if self.use_labels: A_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices ) A_ : Union[str, Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Optional[Any] ): '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,) def _a ( self : List[Any] ,_a : List[Any] ,_a : List[str] ,_a : Union[str, Any] ,_a : str ,_a : str ,_a : Tuple ): '''simple docstring''' A_ : List[Any] = SqueezeBertModel(config=_a ) model.to(_a ) model.eval() A_ : Tuple = model(_a ,_a ) A_ : Any = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[Any] ,_a : List[Any] ,_a : int ,_a : Dict ,_a : Tuple ,_a : Dict ,_a : Union[str, Any] ): '''simple docstring''' A_ : Tuple = SqueezeBertForMaskedLM(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,attention_mask=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Any ,_a : int ,_a : str ,_a : str ,_a : List[str] ,_a : List[Any] ,_a : Tuple ): '''simple docstring''' A_ : Optional[int] = SqueezeBertForQuestionAnswering(config=_a ) model.to(_a ) model.eval() A_ : Dict = model( _a ,attention_mask=_a ,start_positions=_a ,end_positions=_a ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _a ( self : Tuple ,_a : Any ,_a : Union[str, Any] ,_a : int ,_a : List[Any] ,_a : int ,_a : int ): '''simple docstring''' A_ : Any = self.num_labels A_ : Union[str, Any] = SqueezeBertForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : List[Any] = model(_a ,attention_mask=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : str ,_a : str ,_a : Tuple ,_a : Union[str, Any] ,_a : Dict ,_a : str ,_a : Optional[int] ): '''simple docstring''' A_ : str = self.num_labels A_ : Dict = SqueezeBertForTokenClassification(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,attention_mask=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,_a : int ,_a : Any ): '''simple docstring''' A_ : int = self.num_choices A_ : Optional[Any] = SqueezeBertForMultipleChoice(config=_a ) model.to(_a ) model.eval() A_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A_ : List[str] = model( _a ,attention_mask=_a ,labels=_a ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Tuple = self.prepare_config_and_inputs() ((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) : List[str] = config_and_inputs A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) a_ = ( { """feature-extraction""": SqueezeBertModel, """fill-mask""": SqueezeBertForMaskedLM, """question-answering""": SqueezeBertForQuestionAnswering, """text-classification""": SqueezeBertForSequenceClassification, """token-classification""": SqueezeBertForTokenClassification, """zero-shot""": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) a_ = False a_ = True a_ = False def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = SqueezeBertModelTester(self ) A_ : int = ConfigTester(self ,config_class=_a ,dim=37 ) def _a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : List[Any] ): '''simple docstring''' A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_a ) def _a ( self : Dict ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_a ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_a ) def _a ( self : Dict ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_a ) @slow def _a ( self : str ): '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = SqueezeBertModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_sentencepiece @require_tokenizers @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" ) A_ : List[str] = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) A_ : Union[str, Any] = model(_a )[0] A_ : int = torch.Size((1, 3) ) self.assertEqual(output.shape ,_a ) A_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(_a ,_a ,atol=1e-4 ) )
27
'''simple docstring''' from ... import PretrainedConfig __magic_name__ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = """nezha""" def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Tuple = hidden_act A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Optional[Any] = max_relative_position A_ : List[Any] = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : Dict = classifier_dropout A_ : int = use_cache
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : list[int]): if not numbers: return 0 if not isinstance(lowerCamelCase , (list, tuple)) or not all( isinstance(lowerCamelCase , lowerCamelCase) for number in numbers): raise ValueError("""numbers must be an iterable of integers""") A_ : str = numbers[0] for i in range(1 , len(lowerCamelCase)): # update the maximum and minimum subarray products A_ : Any = numbers[i] if number < 0: A_ , A_ : Optional[int] = min_till_now, max_till_now A_ : int = max(lowerCamelCase , max_till_now * number) A_ : Optional[Any] = min(lowerCamelCase , min_till_now * number) # update the maximum product found till now A_ : int = max(lowerCamelCase , lowerCamelCase) return max_prod
27
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str): A_ , A_ : List[Any] = set(lowerCamelCase), [start] while stack: A_ : Optional[Any] = stack.pop() explored.add(lowerCamelCase) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(lowerCamelCase) return explored __magic_name__ = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
27
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any): # load base model A_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase , torch_dtype=torch.floataa) # load LoRA weight from .safetensors A_ : int = load_file(lowerCamelCase) A_ : str = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A_ : int = key.split(""".""")[0].split(LORA_PREFIX_TEXT_ENCODER + """_""")[-1].split("""_""") A_ : Optional[Any] = pipeline.text_encoder else: A_ : Optional[Any] = key.split(""".""")[0].split(LORA_PREFIX_UNET + """_""")[-1].split("""_""") A_ : Dict = pipeline.unet # find the target layer A_ : List[str] = layer_infos.pop(0) while len(lowerCamelCase) > -1: try: A_ : int = curr_layer.__getattr__(lowerCamelCase) if len(lowerCamelCase) > 0: A_ : int = layer_infos.pop(0) elif len(lowerCamelCase) == 0: break except Exception: if len(lowerCamelCase) > 0: temp_name += "_" + layer_infos.pop(0) else: A_ : List[Any] = layer_infos.pop(0) A_ : Optional[Any] = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""")) pair_keys.append(lowerCamelCase) else: pair_keys.append(lowerCamelCase) pair_keys.append(key.replace("""lora_up""" , """lora_down""")) # update weight if len(state_dict[pair_keys[0]].shape) == 4: A_ : Union[str, Any] = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa) A_ : str = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa) curr_layer.weight.data += alpha * torch.mm(lowerCamelCase , lowerCamelCase).unsqueeze(2).unsqueeze(3) else: A_ : int = state_dict[pair_keys[0]].to(torch.floataa) A_ : str = state_dict[pair_keys[1]].to(torch.floataa) curr_layer.weight.data += alpha * torch.mm(lowerCamelCase , lowerCamelCase) # update visited list for item in pair_keys: visited.append(lowerCamelCase) return pipeline if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.7_5, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') __magic_name__ = parser.parse_args() __magic_name__ = args.base_model_path __magic_name__ = args.checkpoint_path __magic_name__ = args.dump_path __magic_name__ = args.lora_prefix_unet __magic_name__ = args.lora_prefix_text_encoder __magic_name__ = args.alpha __magic_name__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __magic_name__ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Dict): A_ : List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: A_ : Union[str, Any] = [144, 192, 240] A_ : int = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: A_ : List[str] = [96, 120, 144] A_ : Any = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: A_ : Any = [64, 80, 96] A_ : List[str] = [16, 16, 24, 48, 64, 80, 320] A_ : Any = 0.05 A_ : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_"""): A_ : int = 512 A_ : Optional[int] = 16 A_ : List[Any] = 21 A_ : List[str] = """pascal-voc-id2label.json""" else: A_ : str = 1000 A_ : Any = """imagenet-1k-id2label.json""" A_ : Any = """huggingface/label-files""" A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r""")) A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False): for i in range(1 , 6): if F'layer_{i}.' in name: A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.') if "conv_1." in name: A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""") if ".block." in name: A_ : Optional[Any] = name.replace(""".block.""" , """.""") if "exp_1x1" in name: A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""") if "red_1x1" in name: A_ : int = name.replace("""red_1x1""" , """reduce_1x1""") if ".local_rep.conv_3x3." in name: A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""") if ".local_rep.conv_1x1." in name: A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""") if ".norm." in name: A_ : Tuple = name.replace(""".norm.""" , """.normalization.""") if ".conv." in name: A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""") if ".conv_proj." in name: A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""") for i in range(0 , 2): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.') if "expand_1x1" in name: A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""") if "conv_3x3" in name: A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""") if "reduce_1x1" in name: A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""") for i in range(2 , 5): if F'.global_rep.{i}.weight' in name: A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""") if F'.global_rep.{i}.bias' in name: A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""") if ".global_rep." in name: A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""") if ".pre_norm_mha.0." in name: A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""") if ".pre_norm_mha.1.out_proj." in name: A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""") if ".pre_norm_ffn.0." in name: A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""") if ".pre_norm_ffn.1." in name: A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""") if ".pre_norm_ffn.4." in name: A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""") if ".transformer." in name: A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""") if ".aspp_layer." in name: A_ : int = name.replace(""".aspp_layer.""" , """.""") if ".aspp_pool." in name: A_ : Tuple = name.replace(""".aspp_pool.""" , """.""") if "seg_head." in name: A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""") if "segmentation_head.classifier.classifier." in name: A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""") if "classifier.fc." in name: A_ : str = name.replace("""classifier.fc.""" , """classifier.""") elif (not base_model) and ("segmentation_head." not in name): A_ : str = """mobilevit.""" + name return name def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False): if base_model: A_ : Dict = """""" else: A_ : Any = """mobilevit.""" for key in orig_state_dict.copy().keys(): A_ : List[Any] = orig_state_dict.pop(lowerCamelCase) if key[:8] == "encoder.": A_ : int = key[8:] if "qkv" in key: A_ : Any = key.split(""".""") A_ : str = int(key_split[0][6:]) - 1 A_ : int = int(key_split[3]) A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}') A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size A_ : Optional[Any] = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: A_ : Dict = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : List[Any] = val[-dim:, :] else: A_ : Optional[Any] = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : Any = val[-dim:] else: A_ : List[str] = val return orig_state_dict def lowerCamelCase ( ): A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw) return im @torch.no_grad() def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False): A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase) # load original state_dict A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""") # load 🤗 model if mobilevit_name.startswith("""deeplabv3_"""): A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval() else: A_ : str = MobileViTForImageClassification(lowerCamelCase).eval() A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase) model.load_state_dict(lowerCamelCase) # Check outputs on an image, prepared by MobileViTImageProcessor A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") A_ : List[Any] = model(**lowerCamelCase) A_ : Dict = outputs.logits if mobilevit_name.startswith("""deeplabv3_"""): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": A_ : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": A_ : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": A_ : Tuple = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4) Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase) print(F'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase) if push_to_hub: A_ : str = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""") A_ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase , organization="""apple""") model.push_to_hub(lowerCamelCase , organization="""apple""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __magic_name__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int = 100): A_ : Any = n * (n + 1) * (2 * n + 1) / 6 A_ : str = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares) if __name__ == "__main__": print(f"""{solution() = }""")
27
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __magic_name__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""pixel_values"""] def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,): '''simple docstring''' super().__init__(**_a ) A_ : Tuple = size if size is not None else {"""shortest_edge""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" ) A_ : Any = do_resize A_ : List[str] = size A_ : Union[str, Any] = resample A_ : Dict = do_center_crop A_ : List[str] = crop_size A_ : Any = do_rescale A_ : Union[str, Any] = rescale_factor A_ : Any = do_normalize A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Tuple = do_convert_rgb def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,): '''simple docstring''' A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a ) return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a ) def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,): '''simple docstring''' A_ : Optional[int] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,): '''simple docstring''' return rescale(_a ,scale=_a ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,): '''simple docstring''' return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a ) def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,): '''simple docstring''' A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A_ : Tuple = size if size is not None else self.size A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a ) A_ : List[str] = resample if resample is not None else self.resample A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a ) A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Any = do_normalize if do_normalize is not None else self.do_normalize A_ : int = image_mean if image_mean is not None else self.image_mean A_ : int = image_std if image_std is not None else self.image_std A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : int = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : Optional[int] = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. A_ : Dict = [to_numpy_array(_a ) for image in images] if do_resize: A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images] if do_center_crop: A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images] if do_normalize: A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images] A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images] A_ : List[str] = {"""pixel_values""": images} return BatchFeature(data=_a ,tensor_type=_a )
27
1
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __magic_name__ = trt.Logger(trt.Logger.WARNING) __magic_name__ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __magic_name__ = logging.getLogger(__name__) __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __magic_name__ = parser.parse_args() if args.tokenizer_name: __magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __magic_name__ = args.per_device_eval_batch_size __magic_name__ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __magic_name__ = True __magic_name__ = 'temp_engine/bert-fp32.engine' if args.fpaa: __magic_name__ = 'temp_engine/bert-fp16.engine' if args.inta: __magic_name__ = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __magic_name__ = [network.get_input(i) for i in range(network.num_inputs)] __magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __magic_name__ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __magic_name__ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __magic_name__ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]): A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa) A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa) A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase) # start time A_ : List[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Synchronize the stream and take time stream.synchronize() # end time A_ : str = time.time() A_ : Tuple = end_time - start_time A_ : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __magic_name__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __magic_name__ = raw_datasets['validation'].column_names __magic_name__ = 'question' if 'question' in column_names else column_names[0] __magic_name__ = 'context' if 'context' in column_names else column_names[1] __magic_name__ = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __magic_name__ = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __magic_name__ = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase ( lowerCamelCase : Dict): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A_ : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A_ : Union[str, Any] = [] for i in range(len(tokenized_examples["""input_ids"""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase) A_ : Tuple = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A_ : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A_ : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i]) ] return tokenized_examples __magic_name__ = raw_datasets['validation'] # Validation Feature Creation __magic_name__ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __magic_name__ = default_data_collator __magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __magic_name__ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. A_ : Tuple = postprocess_qa_predictions( examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A_ : Dict = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase) __magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize # Allocate device memory for inputs and outputs. __magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __magic_name__ = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f""" Num examples = {len(eval_dataset)}""") logger.info(f""" Batch size = {args.per_device_eval_batch_size}""") __magic_name__ = 0.0 __magic_name__ = 0 __magic_name__ = timeit.default_timer() __magic_name__ = None for step, batch in enumerate(eval_dataloader): __magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __magic_name__ , __magic_name__ = outputs __magic_name__ = torch.tensor(start_logits) __magic_name__ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __magic_name__ = nested_truncate(all_preds, len(eval_dataset)) __magic_name__ = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds) __magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"""Evaluation metrics: {eval_metric}""")
27
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ): '''simple docstring''' warnings.warn( """The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use OwlViTImageProcessor instead.""" ,_a ,) super().__init__(*_a ,**_a )
27
1
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = 1 @register_to_config def __init__( self : str ,_a : Dict=2000 ,_a : Optional[Any]=0.1 ,_a : List[str]=20 ,_a : Tuple=1e-3 ): '''simple docstring''' A_ : List[str] = None A_ : Tuple = None A_ : Any = None def _a ( self : int ,_a : Optional[int] ,_a : Union[str, torch.device] = None ): '''simple docstring''' A_ : int = torch.linspace(1 ,self.config.sampling_eps ,_a ,device=_a ) def _a ( self : int ,_a : str ,_a : Optional[int] ,_a : str ,_a : List[Any]=None ): '''simple docstring''' if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score A_ : str = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) A_ : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) A_ : Any = std.flatten() while len(std.shape ) < len(score.shape ): A_ : List[str] = std.unsqueeze(-1 ) A_ : Dict = -score / std # compute A_ : Dict = -1.0 / len(self.timesteps ) A_ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) A_ : Optional[int] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): A_ : str = beta_t.unsqueeze(-1 ) A_ : int = -0.5 * beta_t * x A_ : List[str] = torch.sqrt(_a ) A_ : Any = drift - diffusion**2 * score A_ : Optional[int] = x + drift * dt # add noise A_ : Optional[Any] = randn_tensor(x.shape ,layout=x.layout ,generator=_a ,device=x.device ,dtype=x.dtype ) A_ : Dict = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : List[Any] ): '''simple docstring''' return self.config.num_train_timesteps
27
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ): A_ : int = symbols(lowerCamelCase) A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase) A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase)) A_ : str = starting_point while True: if diff_function(lowerCamelCase) != 0: A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function( lowerCamelCase) else: raise ZeroDivisionError("""Could not find root""") from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess) < precision: return next_guess A_ : Union[str, Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( 'The root of log(y) - 1 = 0 is ', f"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
27
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = 42 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' @register_to_config def __init__( self : str ,_a : int = 3 ,_a : int = 3 ,_a : Tuple[str] = ("DownEncoderBlock2D",) ,_a : Tuple[str] = ("UpDecoderBlock2D",) ,_a : Tuple[int] = (64,) ,_a : int = 1 ,_a : str = "silu" ,_a : int = 3 ,_a : int = 32 ,_a : int = 256 ,_a : int = 32 ,_a : Optional[int] = None ,_a : float = 0.18215 ,_a : str = "group" ,): '''simple docstring''' super().__init__() # pass init params to Encoder A_ : Union[str, Any] = Encoder( in_channels=_a ,out_channels=_a ,down_block_types=_a ,block_out_channels=_a ,layers_per_block=_a ,act_fn=_a ,norm_num_groups=_a ,double_z=_a ,) A_ : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels A_ : Optional[Any] = nn.Convad(_a ,_a ,1 ) A_ : int = VectorQuantizer(_a ,_a ,beta=0.25 ,remap=_a ,sane_index_shape=_a ) A_ : Dict = nn.Convad(_a ,_a ,1 ) # pass init params to Decoder A_ : Union[str, Any] = Decoder( in_channels=_a ,out_channels=_a ,up_block_types=_a ,block_out_channels=_a ,layers_per_block=_a ,act_fn=_a ,norm_num_groups=_a ,norm_type=_a ,) @apply_forward_hook def _a ( self : str ,_a : torch.FloatTensor ,_a : bool = True ): '''simple docstring''' A_ : Union[str, Any] = self.encoder(_a ) A_ : Optional[Any] = self.quant_conv(_a ) if not return_dict: return (h,) return VQEncoderOutput(latents=_a ) @apply_forward_hook def _a ( self : Any ,_a : torch.FloatTensor ,_a : bool = False ,_a : bool = True ): '''simple docstring''' if not force_not_quantize: A_ , A_ , A_ : str = self.quantize(_a ) else: A_ : int = h A_ : int = self.post_quant_conv(_a ) A_ : List[str] = self.decoder(_a ,quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_a ) def _a ( self : Dict ,_a : torch.FloatTensor ,_a : bool = True ): '''simple docstring''' A_ : Tuple = sample A_ : Dict = self.encode(_a ).latents A_ : List[Any] = self.decode(_a ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_a )
27
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_a : Dict ): '''simple docstring''' super().__init__() A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a ) A_ : int = list(model.children() )[:-2] A_ : int = nn.Sequential(*_a ) A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : str ,_a : Optional[int] ): '''simple docstring''' A_ : Tuple = self.pool(self.model(_a ) ) A_ : Any = torch.flatten(_a ,start_dim=2 ) A_ : str = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ): '''simple docstring''' A_ : Dict = [json.loads(_a ) for l in open(_a )] A_ : Optional[int] = os.path.dirname(_a ) A_ : Optional[Any] = tokenizer A_ : Optional[Any] = labels A_ : List[Any] = len(_a ) A_ : str = max_seq_length A_ : str = transforms def __len__( self : str ): '''simple docstring''' return len(self.data ) def __getitem__( self : Tuple ,_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) ) A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1] A_ : Optional[int] = sentence[: self.max_seq_length] A_ : Any = torch.zeros(self.n_classes ) A_ : Tuple = 1 A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" ) A_ : Union[str, Any] = self.transforms(_a ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : List[Any] ): '''simple docstring''' A_ : str = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( lowerCamelCase : str): A_ : List[Any] = [len(row["""sentence"""]) for row in batch] A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase) A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)): A_ : str = input_row["""sentence"""] A_ : Tuple = 1 A_ : int = torch.stack([row["""image"""] for row in batch]) A_ : str = torch.stack([row["""label"""] for row in batch]) A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch]) A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ])
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase ( lowerCamelCase : int): if num <= 0: A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.' raise ValueError(lowerCamelCase) A_ : str = [True] * (num + 1) A_ : Tuple = [] A_ : str = 2 A_ : Any = int(math.sqrt(lowerCamelCase)) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase): if sieve[i] is True: A_ : Union[str, Any] = False start += 1 for j in range(end + 1 , num + 1): if sieve[j] is True: prime.append(lowerCamelCase) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
27
1
'''simple docstring''' import numpy as np def lowerCamelCase ( lowerCamelCase : np.ndarray , lowerCamelCase : np.ndarray , lowerCamelCase : float = 1E-12 , lowerCamelCase : int = 100 , ): assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1] # Ensure proper dimensionality. assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase) A_ : Tuple = np.iscomplexobj(lowerCamelCase) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowerCamelCase , input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. A_ : int = False A_ : int = 0 A_ : Tuple = 0 A_ : Dict = 1E12 while not convergence: # Multiple matrix by the vector. A_ : List[str] = np.dot(lowerCamelCase , lowerCamelCase) # Normalize the resulting output vector. A_ : str = w / np.linalg.norm(lowerCamelCase) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) A_ : Optional[int] = vector.conj().T if is_complex else vector.T A_ : List[Any] = np.dot(lowerCamelCase , np.dot(lowerCamelCase , lowerCamelCase)) # Check convergence. A_ : Any = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: A_ : str = True A_ : str = lambda_ if is_complex: A_ : List[Any] = np.real(lambda_) return lambda_, vector def lowerCamelCase ( ): A_ : List[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]]) A_ : int = np.array([41, 4, 20]) A_ : Optional[int] = real_input_matrix.astype(np.complexaaa) A_ : int = np.triu(1j * complex_input_matrix , 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T A_ : List[str] = np.array([41, 4, 20]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": A_ : int = real_input_matrix A_ : str = real_vector elif problem_type == "complex": A_ : List[str] = complex_input_matrix A_ : List[str] = complex_vector # Our implementation. A_ , A_ : Optional[int] = power_iteration(lowerCamelCase , lowerCamelCase) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). A_ , A_ : Union[str, Any] = np.linalg.eigh(lowerCamelCase) # Last eigenvalue is the maximum one. A_ : Tuple = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. A_ : Any = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
27
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __magic_name__ = trt.Logger(trt.Logger.WARNING) __magic_name__ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __magic_name__ = logging.getLogger(__name__) __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __magic_name__ = parser.parse_args() if args.tokenizer_name: __magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __magic_name__ = args.per_device_eval_batch_size __magic_name__ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __magic_name__ = True __magic_name__ = 'temp_engine/bert-fp32.engine' if args.fpaa: __magic_name__ = 'temp_engine/bert-fp16.engine' if args.inta: __magic_name__ = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __magic_name__ = [network.get_input(i) for i in range(network.num_inputs)] __magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __magic_name__ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __magic_name__ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __magic_name__ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]): A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa) A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa) A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase) # start time A_ : List[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Synchronize the stream and take time stream.synchronize() # end time A_ : str = time.time() A_ : Tuple = end_time - start_time A_ : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __magic_name__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __magic_name__ = raw_datasets['validation'].column_names __magic_name__ = 'question' if 'question' in column_names else column_names[0] __magic_name__ = 'context' if 'context' in column_names else column_names[1] __magic_name__ = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __magic_name__ = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __magic_name__ = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase ( lowerCamelCase : Dict): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A_ : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A_ : Union[str, Any] = [] for i in range(len(tokenized_examples["""input_ids"""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase) A_ : Tuple = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A_ : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A_ : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i]) ] return tokenized_examples __magic_name__ = raw_datasets['validation'] # Validation Feature Creation __magic_name__ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __magic_name__ = default_data_collator __magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __magic_name__ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. A_ : Tuple = postprocess_qa_predictions( examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A_ : Dict = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase) __magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize # Allocate device memory for inputs and outputs. __magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __magic_name__ = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f""" Num examples = {len(eval_dataset)}""") logger.info(f""" Batch size = {args.per_device_eval_batch_size}""") __magic_name__ = 0.0 __magic_name__ = 0 __magic_name__ = timeit.default_timer() __magic_name__ = None for step, batch in enumerate(eval_dataloader): __magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __magic_name__ , __magic_name__ = outputs __magic_name__ = torch.tensor(start_logits) __magic_name__ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __magic_name__ = nested_truncate(all_preds, len(eval_dataset)) __magic_name__ = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds) __magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"""Evaluation metrics: {eval_metric}""")
27
1
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __magic_name__ = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } __magic_name__ = {'facebook/blenderbot-3B': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase ( ): A_ : int = ( list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1)) ) A_ : Dict = bs[:] A_ : Tuple = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase) cs.append(2**8 + n) n += 1 A_ : Optional[Any] = [chr(lowerCamelCase) for n in cs] return dict(zip(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : List[str]): A_ : Any = set() A_ : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char)) A_ : Tuple = char return pairs class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] def __init__( self : Union[str, Any] ,_a : Tuple ,_a : Dict ,_a : List[str]="replace" ,_a : List[str]="<s>" ,_a : int="</s>" ,_a : Tuple="</s>" ,_a : List[Any]="<s>" ,_a : Union[str, Any]="<unk>" ,_a : List[str]="<pad>" ,_a : Optional[int]="<mask>" ,_a : Optional[Any]=False ,**_a : List[str] ,): '''simple docstring''' A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : List[str] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token super().__init__( errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,) with open(_a ,encoding="""utf-8""" ) as vocab_handle: A_ : Any = json.load(_a ) A_ : str = {v: k for k, v in self.encoder.items()} A_ : Union[str, Any] = errors # how to handle errors in decoding A_ : Any = bytes_to_unicode() A_ : int = {v: k for k, v in self.byte_encoder.items()} with open(_a ,encoding="""utf-8""" ) as merges_handle: A_ : int = merges_handle.read().split("""\n""" )[1:-1] A_ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] A_ : Union[str, Any] = dict(zip(_a ,range(len(_a ) ) ) ) A_ : Any = {} A_ : int = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A_ : int = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _a ( self : Tuple ): '''simple docstring''' return len(self.encoder ) def _a ( self : Dict ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def _a ( self : Optional[Any] ,_a : Any ): '''simple docstring''' if token in self.cache: return self.cache[token] A_ : Optional[int] = tuple(_a ) A_ : int = get_pairs(_a ) if not pairs: return token while True: A_ : List[str] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A_ , A_ : Dict = bigram A_ : Optional[Any] = [] A_ : Dict = 0 while i < len(_a ): try: A_ : int = word.index(_a ,_a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A_ : Any = j if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A_ : Union[str, Any] = tuple(_a ) A_ : Optional[Any] = new_word if len(_a ) == 1: break else: A_ : str = get_pairs(_a ) A_ : List[Any] = """ """.join(_a ) A_ : str = word return word def _a ( self : Optional[Any] ,_a : List[str] ): '''simple docstring''' A_ : Union[str, Any] = [] for token in re.findall(self.pat ,_a ): A_ : str = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) ) return bpe_tokens def _a ( self : str ,_a : List[str] ): '''simple docstring''' return self.encoder.get(_a ,self.encoder.get(self.unk_token ) ) def _a ( self : Any ,_a : List[Any] ): '''simple docstring''' return self.decoder.get(_a ) def _a ( self : Union[str, Any] ,_a : Dict ): '''simple docstring''' A_ : int = """""".join(_a ) A_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def _a ( self : Tuple ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : Union[str, Any] = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : Dict = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_a ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" ) A_ : List[Any] = 0 with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) A_ : Tuple = token_index writer.write(""" """.join(_a ) + """\n""" ) index += 1 return vocab_file, merge_file def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1] def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Optional[int] = [self.sep_token_id] A_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any]=False ,**_a : Any ): '''simple docstring''' A_ : Optional[Any] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()): A_ : List[str] = """ """ + text return (text, kwargs) def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def _a ( self : List[str] ,_a : "Conversation" ): '''simple docstring''' A_ : Tuple = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text ) else: # Generated responses should contain them already. inputs.append(_a ) A_ : List[Any] = """ """.join(_a ) A_ : List[str] = self.encode(_a ) if len(_a ) > self.model_max_length: A_ : List[Any] = input_ids[-self.model_max_length :] logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __magic_name__ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['ConvNextFeatureExtractor'] __magic_name__ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
27
1
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = BioGptTokenizer a_ = False def _a ( self : Tuple ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ : int = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] A_ : Any = dict(zip(_a ,range(len(_a ) ) ) ) A_ : List[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] A_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ) as fp: fp.write(json.dumps(_a ) ) with open(self.merges_file ,"""w""" ) as fp: fp.write("""\n""".join(_a ) ) def _a ( self : Optional[int] ,_a : Union[str, Any] ): '''simple docstring''' A_ : int = """lower newer""" A_ : Any = """lower newer""" return input_text, output_text def _a ( self : int ): '''simple docstring''' A_ : Optional[Any] = BioGptTokenizer(self.vocab_file ,self.merges_file ) A_ : Optional[Any] = """lower""" A_ : Union[str, Any] = ["""low""", """er</w>"""] A_ : Union[str, Any] = tokenizer.tokenize(_a ) self.assertListEqual(_a ,_a ) A_ : List[str] = tokens + ["""<unk>"""] A_ : Optional[int] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a ) @slow def _a ( self : Dict ): '''simple docstring''' A_ : List[str] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) A_ : List[str] = tokenizer.encode("""sequence builders""" ,add_special_tokens=_a ) A_ : Tuple = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_a ) A_ : str = tokenizer.build_inputs_with_special_tokens(_a ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(_a ,_a ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
27
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_text_model""" def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Optional[int] = intermediate_size A_ : Optional[int] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : int = max_position_embeddings A_ : str = hidden_act A_ : Union[str, Any] = layer_norm_eps A_ : Tuple = attention_dropout A_ : Union[str, Any] = initializer_range A_ : List[Any] = initializer_factor @classmethod def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : int = cls.get_config_dict(_a ,**_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_vision_model""" def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,): '''simple docstring''' super().__init__(**_a ) A_ : List[str] = hidden_size A_ : Union[str, Any] = intermediate_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : int = num_channels A_ : str = image_size A_ : List[Any] = patch_size A_ : int = hidden_act A_ : List[Any] = layer_norm_eps A_ : List[str] = attention_dropout A_ : str = initializer_range A_ : str = initializer_factor @classmethod def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit""" a_ = True def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,): '''simple docstring''' super().__init__(**_a ) if text_config is None: A_ : List[Any] = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) A_ : Dict = OwlViTTextConfig(**_a ) A_ : Dict = OwlViTVisionConfig(**_a ) A_ : Any = projection_dim A_ : Optional[int] = logit_scale_init_value A_ : Optional[int] = return_dict A_ : Dict = 1.0 @classmethod def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a ) if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) @classmethod def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ): '''simple docstring''' A_ : str = {} A_ : int = text_config A_ : Union[str, Any] = vision_config return cls.from_dict(_a ,**_a ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : Optional[int] = self.vision_config.to_dict() A_ : List[Any] = self.__class__.model_type return output class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : int ): '''simple docstring''' return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _a ( self : str ): '''simple docstring''' return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _a ( self : Optional[Any] ): '''simple docstring''' return 1e-4 def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs( processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a ) A_ : Any = super().generate_dummy_inputs( processor.image_processor ,batch_size=_a ,framework=_a ) return {**text_input_dict, **image_input_dict} @property def _a ( self : Optional[Any] ): '''simple docstring''' return 14
27
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __magic_name__ = 16 __magic_name__ = 32 def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 , lowerCamelCase : str = "bert-base-cased"): A_ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase) A_ : Tuple = load_dataset("""glue""" , """mrpc""") def tokenize_function(lowerCamelCase : Any): # max_length=None => use the model max length (it's actually the default) A_ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A_ : str = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(lowerCamelCase : str): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""") return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""") # Instantiate dataloaders. A_ : List[str] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase) A_ : Dict = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase) return train_dataloader, eval_dataloader def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Tuple): model.eval() A_ : Union[str, Any] = 0 for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): A_ : Union[str, Any] = model(**lowerCamelCase) A_ : Optional[int] = outputs.logits.argmax(dim=-1) # It is slightly faster to call this once, than multiple times A_ , A_ : Tuple = accelerator.gather( (predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCamelCase) - 1: A_ : int = predictions[: len(eval_dataloader.dataset) - samples_seen] A_ : List[str] = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) A_ : str = metric.compute() return eval_metric["accuracy"] def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str): # Initialize accelerator A_ : Optional[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : List[str] = config["""lr"""] A_ : Optional[int] = int(config["""num_epochs"""]) A_ : Optional[Any] = int(config["""seed"""]) A_ : str = int(config["""batch_size"""]) A_ : Any = args.model_name_or_path set_seed(lowerCamelCase) A_ , A_ : Optional[Any] = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase) # Instantiate optimizer A_ : Union[str, Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A_ : Any = optimizer_cls(params=model.parameters() , lr=lowerCamelCase) if accelerator.state.deepspeed_plugin is not None: A_ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: A_ : Dict = 1 A_ : Optional[int] = (len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A_ : Any = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , ) else: A_ : Optional[Any] = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Optional[Any] = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) # We need to keep track of how many total steps we have iterated over A_ : List[str] = 0 # We also need to keep track of the stating epoch so files are named properly A_ : Dict = 0 A_ : Optional[Any] = evaluate.load("""glue""" , """mrpc""") A_ : str = num_epochs if args.partial_train_epoch is not None: A_ : Optional[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint) A_ : List[str] = args.resume_from_checkpoint.split("""epoch_""")[1] A_ : Optional[Any] = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break A_ : Optional[Any] = int(lowerCamelCase) + 1 A_ : Dict = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) accelerator.print("""resumed checkpoint performance:""" , lowerCamelCase) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0]) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""]) with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json') , """r""") as f: A_ : Union[str, Any] = json.load(lowerCamelCase) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model A_ : Tuple = {} for epoch in range(lowerCamelCase , lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): A_ : Dict = model(**lowerCamelCase) A_ : str = outputs.loss A_ : List[str] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 A_ : List[str] = F'epoch_{epoch}' A_ : Optional[Any] = os.path.join(args.output_dir , lowerCamelCase) accelerator.save_state(lowerCamelCase) A_ : List[Any] = evaluation_loop(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) A_ : str = accuracy A_ : List[Any] = lr_scheduler.get_lr()[0] A_ : int = optimizer.param_groups[0]["""lr"""] A_ : Any = epoch A_ : Optional[Any] = overall_step accelerator.print(F'epoch {epoch}:' , lowerCamelCase) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , F'state_{epoch}.json') , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( ): A_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""") parser.add_argument( """--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=lowerCamelCase , default=lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=lowerCamelCase , default=lowerCamelCase , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase , default=2 , help="""Number of train epochs.""" , ) A_ : Tuple = parser.parse_args() A_ : Union[str, Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase) if __name__ == "__main__": main()
27
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""input_features""", """is_longer"""] def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,): '''simple docstring''' super().__init__( feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,) A_ : Tuple = top_db A_ : Tuple = truncation A_ : Optional[Any] = padding A_ : Optional[int] = fft_window_size A_ : Dict = (fft_window_size >> 1) + 1 A_ : Any = hop_length A_ : List[Any] = max_length_s A_ : Tuple = max_length_s * sampling_rate A_ : Tuple = sampling_rate A_ : Optional[int] = frequency_min A_ : Tuple = frequency_max A_ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,) A_ : Dict = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,) def _a ( self : int ): '''simple docstring''' A_ : int = copy.deepcopy(self.__dict__ ) A_ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ): '''simple docstring''' A_ : List[str] = spectrogram( _a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,) return log_mel_spectrogram.T def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A_ : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A_ : int = [0] # randomly choose index for each part A_ : List[str] = np.random.choice(ranges[0] ) A_ : int = np.random.choice(ranges[1] ) A_ : Optional[int] = np.random.choice(ranges[2] ) A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :] A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] A_ : Dict = mel[idx_back : idx_back + chunk_frames, :] A_ : Optional[int] = torch.tensor(mel[None, None, :] ) A_ : Dict = torch.nn.functional.interpolate( _a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a ) A_ : str = mel_shrink[0][0].numpy() A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": A_ : Dict = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A_ : Tuple = len(_a ) - max_length A_ : Optional[int] = np.random.randint(0 ,overflow + 1 ) A_ : List[Any] = waveform[idx : idx + max_length] A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A_ : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 ) A_ : str = False else: A_ : str = self._random_mel_fusion(_a ,_a ,_a ) A_ : Optional[Any] = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented' ) else: A_ : Optional[int] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A_ : int = int(max_length / len(_a ) ) A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A_ : List[str] = int(max_length / len(_a ) ) A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) ) A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,): '''simple docstring''' A_ : List[str] = truncation if truncation is not None else self.truncation A_ : List[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) A_ : int = is_batched_numpy or ( isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_a ,np.ndarray ): A_ : str = np.asarray(_a ,dtype=np.floataa ) elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Any = [np.asarray(_a )] # convert to mel spectrogram, truncate and pad if needed. A_ : str = [ self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a ) for waveform in raw_speech ] A_ : int = [] A_ : Any = [] for mel, longer in padded_inputs: input_mel.append(_a ) is_longer.append(_a ) if truncation == "fusion" and sum(_a ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A_ : List[Any] = np.random.randint(0 ,len(_a ) ) A_ : List[str] = True if isinstance(input_mel[0] ,_a ): A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A_ : List[str] = [[longer] for longer in is_longer] A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} A_ : int = BatchFeature(_a ) if return_tensors is not None: A_ : int = input_features.convert_to_tensors(_a ) return input_features
27
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = IFInpaintingPipeline a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a_ = PipelineTesterMixin.required_optional_params - {"""latents"""} def _a ( self : int ): '''simple docstring''' return self._get_dummy_components() def _a ( self : List[str] ,_a : Tuple ,_a : Any=0 ): '''simple docstring''' if str(_a ).startswith("""mps""" ): A_ : Optional[Any] = torch.manual_seed(_a ) else: A_ : Tuple = torch.Generator(device=_a ).manual_seed(_a ) A_ : Dict = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_a ) ).to(_a ) A_ : Optional[int] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_a ) ).to(_a ) A_ : str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,) def _a ( self : List[str] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _a ( self : Optional[Any] ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" ) def _a ( self : List[str] ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def _a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _a ( self : Any ): '''simple docstring''' self._test_save_load_local() def _a ( self : Dict ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 ,)
27
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : str = batch_size A_ : int = seq_length A_ : Union[str, Any] = is_training A_ : Optional[Any] = use_token_type_ids A_ : int = use_labels A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : int = intermediate_size A_ : Tuple = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : int = initializer_range A_ : Optional[Any] = num_labels A_ : str = num_choices A_ : Optional[Any] = scope A_ : List[Any] = self.vocab_size - 1 def _a ( self : Any ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : List[Any] = None if self.use_token_type_ids: A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : int = None A_ : str = None A_ : Union[str, Any] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Any = ids_tensor([self.batch_size] ,self.num_choices ) A_ : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = OpenAIGPTModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a ) A_ : str = model(_a ,token_type_ids=_a ) A_ : Dict = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ): '''simple docstring''' A_ : str = OpenAIGPTLMHeadModel(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ): '''simple docstring''' A_ : Any = OpenAIGPTDoubleHeadsModel(_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ): '''simple docstring''' A_ : List[str] = self.num_labels A_ : int = OpenAIGPTForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = config_and_inputs A_ : int = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) a_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly a_ = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ): '''simple docstring''' A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,) A_ : Any = inputs_dict["""labels"""] A_ : Any = inputs_dict["""labels"""] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,) A_ : int = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=_a ) return inputs_dict def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Tuple = OpenAIGPTModelTester(self ) A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a ) @slow def _a ( self : List[Any] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : List[str] ): '''simple docstring''' A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_a ) A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is A_ : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : int = model.generate(_a ,do_sample=_a ) self.assertListEqual(output_ids[0].tolist() ,_a )
27
1
'''simple docstring''' from ... import PretrainedConfig __magic_name__ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = """nezha""" def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Tuple = hidden_act A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Optional[Any] = max_relative_position A_ : List[Any] = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : Dict = classifier_dropout A_ : int = use_cache
27
'''simple docstring''' import baseaa def lowerCamelCase ( lowerCamelCase : str): return baseaa.aaaencode(string.encode("""utf-8""")) def lowerCamelCase ( lowerCamelCase : bytes): return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""") if __name__ == "__main__": import doctest doctest.testmod()
27
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( lowerCamelCase : list[int]): A_ : int = len(lowerCamelCase) // 2 # choose the middle 3 elements A_ : Tuple = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m]) == 2: m -= 1 return peak(lst[m:]) # decreasing else: if len(lst[:m]) == 2: m += 1 return peak(lst[:m]) if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowerCamelCase ( lowerCamelCase : Optional[Any]): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def lowerCamelCase ( lowerCamelCase : str): # word like '180' or '身高' or '神' for char in word: A_ : Optional[Any] = ord(lowerCamelCase) if not _is_chinese_char(lowerCamelCase): return 0 return 1 def lowerCamelCase ( lowerCamelCase : List[str]): A_ : Any = set() for token in tokens: A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase) if chinese_word: word_set.add(lowerCamelCase) A_ : Any = list(lowerCamelCase) return word_list def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()): if not chinese_word_set: return bert_tokens A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set]) A_ : str = bert_tokens A_ , A_ : Any = 0, len(lowerCamelCase) while start < end: A_ : Tuple = True if is_chinese(bert_word[start]): A_ : List[str] = min(end - start , lowerCamelCase) for i in range(lowerCamelCase , 1 , -1): A_ : Tuple = """""".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1 , start + i): A_ : Dict = """##""" + bert_word[j] A_ : str = start + i A_ : Dict = False break if single_word: start += 1 return bert_word def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer): A_ : Union[str, Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws A_ : int = [get_chinese_word(lowerCamelCase) for r in res] ltp_res.extend(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : List[Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512) bert_res.extend(res["""input_ids"""]) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Union[str, Any] = [] for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase): A_ : List[Any] = [] for id in input_ids: A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase) input_tokens.append(lowerCamelCase) A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase) A_ : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCamelCase): if token[:2] == "##": A_ : Optional[Any] = token[2:] # save chinese tokens' pos if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)): ref_id.append(lowerCamelCase) ref_ids.append(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) return ref_ids def lowerCamelCase ( lowerCamelCase : Tuple): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""") as f: A_ : Optional[int] = f.readlines() A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device A_ : Dict = BertTokenizer.from_pretrained(args.bert) A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase) with open(args.save_path , """w""" , encoding="""utf-8""") as f: A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids] f.writelines(lowerCamelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) __magic_name__ = parser.parse_args() main(args)
27
1
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="""session""") def lowerCamelCase ( ): A_ : int = 10 A_ : Dict = datasets.Features( { """tokens""": datasets.Sequence(datasets.Value("""string""")), """labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""])), """answers""": datasets.Sequence( { """text""": datasets.Value("""string"""), """answer_start""": datasets.Value("""int32"""), }), """id""": datasets.Value("""int64"""), }) A_ : List[Any] = datasets.Dataset.from_dict( { """tokens""": [["""foo"""] * 5] * n, """labels""": [[1] * 5] * n, """answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10, """id""": list(range(lowerCamelCase)), } , features=lowerCamelCase , ) return dataset @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Tuple): A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """file.arrow""") dataset.map(cache_file_name=lowerCamelCase) return filename # FILE_CONTENT + files __magic_name__ = '\\n Text data.\n Second line of data.' @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : int): A_ : Dict = tmp_path_factory.mktemp("""data""") / """file.txt""" A_ : Any = FILE_CONTENT with open(lowerCamelCase , """w""") as f: f.write(lowerCamelCase) return filename @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : int): import bza A_ : Union[str, Any] = tmp_path_factory.mktemp("""data""") / """file.txt.bz2""" A_ : int = bytes(lowerCamelCase , """utf-8""") with bza.open(lowerCamelCase , """wb""") as f: f.write(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Union[str, Any]): import gzip A_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""") / """file.txt.gz""") A_ : Optional[Any] = bytes(lowerCamelCase , """utf-8""") with gzip.open(lowerCamelCase , """wb""") as f: f.write(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[Any]): if datasets.config.LZ4_AVAILABLE: import lza.frame A_ : Dict = tmp_path_factory.mktemp("""data""") / """file.txt.lz4""" A_ : Optional[int] = bytes(lowerCamelCase , """utf-8""") with lza.frame.open(lowerCamelCase , """wb""") as f: f.write(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]): if datasets.config.PY7ZR_AVAILABLE: import pyazr A_ : Union[str, Any] = tmp_path_factory.mktemp("""data""") / """file.txt.7z""" with pyazr.SevenZipFile(lowerCamelCase , """w""") as archive: archive.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : str): import tarfile A_ : List[str] = tmp_path_factory.mktemp("""data""") / """file.txt.tar""" with tarfile.TarFile(lowerCamelCase , """w""") as f: f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : int): import lzma A_ : List[Any] = tmp_path_factory.mktemp("""data""") / """file.txt.xz""" A_ : Any = bytes(lowerCamelCase , """utf-8""") with lzma.open(lowerCamelCase , """wb""") as f: f.write(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : List[str]): import zipfile A_ : str = tmp_path_factory.mktemp("""data""") / """file.txt.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[int]): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd A_ : str = tmp_path_factory.mktemp("""data""") / """file.txt.zst""" A_ : Tuple = bytes(lowerCamelCase , """utf-8""") with zstd.open(lowerCamelCase , """wb""") as f: f.write(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : int): A_ : List[str] = tmp_path_factory.mktemp("""data""") / """file.xml""" A_ : Optional[int] = textwrap.dedent( """\ <?xml version=\"1.0\" encoding=\"UTF-8\" ?> <tmx version=\"1.4\"> <header segtype=\"sentence\" srclang=\"ca\" /> <body> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv> <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""") with open(lowerCamelCase , """w""") as f: f.write(lowerCamelCase) return filename __magic_name__ = [ {'col_1': '0', 'col_2': 0, 'col_3': 0.0}, {'col_1': '1', 'col_2': 1, 'col_3': 1.0}, {'col_1': '2', 'col_2': 2, 'col_3': 2.0}, {'col_1': '3', 'col_2': 3, 'col_3': 3.0}, ] __magic_name__ = [ {'col_1': '4', 'col_2': 4, 'col_3': 4.0}, {'col_1': '5', 'col_2': 5, 'col_3': 5.0}, ] __magic_name__ = { 'col_1': ['0', '1', '2', '3'], 'col_2': [0, 1, 2, 3], 'col_3': [0.0, 1.0, 2.0, 3.0], } __magic_name__ = [ {'col_3': 0.0, 'col_1': '0', 'col_2': 0}, {'col_3': 1.0, 'col_1': '1', 'col_2': 1}, ] __magic_name__ = [ {'col_1': 's0', 'col_2': 0, 'col_3': 0.0}, {'col_1': 's1', 'col_2': 1, 'col_3': 1.0}, {'col_1': 's2', 'col_2': 2, 'col_3': 2.0}, {'col_1': 's3', 'col_2': 3, 'col_3': 3.0}, ] @pytest.fixture(scope="""session""") def lowerCamelCase ( ): return DATA_DICT_OF_LISTS @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : List[Any]): A_ : List[Any] = datasets.Dataset.from_dict(lowerCamelCase) A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.arrow""") dataset.map(cache_file_name=lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[int]): A_ : int = str(tmp_path_factory.mktemp("""data""") / """dataset.sqlite""") with contextlib.closing(sqlitea.connect(lowerCamelCase)) as con: A_ : Optional[int] = con.cursor() cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""") for item in DATA: cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values())) con.commit() return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[Any]): A_ : Any = str(tmp_path_factory.mktemp("""data""") / """dataset.csv""") with open(lowerCamelCase , """w""" , newline="""""") as f: A_ : Any = csv.DictWriter(lowerCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""]) writer.writeheader() for item in DATA: writer.writerow(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[Any]): A_ : str = str(tmp_path_factory.mktemp("""data""") / """dataset2.csv""") with open(lowerCamelCase , """w""" , newline="""""") as f: A_ : str = csv.DictWriter(lowerCamelCase , fieldnames=["""col_1""", """col_2""", """col_3"""]) writer.writeheader() for item in DATA: writer.writerow(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : List[Any]): import bza A_ : Optional[int] = tmp_path_factory.mktemp("""data""") / """dataset.csv.bz2""" with open(lowerCamelCase , """rb""") as f: A_ : Optional[int] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowerCamelCase , """wb""") as f: f.write(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Dict): A_ : Optional[int] = tmp_path_factory.mktemp("""data""") / """dataset.csv.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict): A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset.csv.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV"""))) f.write(lowerCamelCase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV"""))) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]): A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.csv.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase))) f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase))) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : str): A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.parquet""") A_ : str = pa.schema( { """col_1""": pa.string(), """col_2""": pa.intaa(), """col_3""": pa.floataa(), }) with open(lowerCamelCase , """wb""") as f: A_ : List[Any] = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase) A_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase))] for k in DATA[0]} , schema=lowerCamelCase) writer.write_table(lowerCamelCase) writer.close() return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Union[str, Any]): A_ : Optional[int] = str(tmp_path_factory.mktemp("""data""") / """dataset.json""") A_ : List[str] = {"""data""": DATA} with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : List[Any]): A_ : Any = str(tmp_path_factory.mktemp("""data""") / """dataset.json""") A_ : Dict = {"""data""": DATA_DICT_OF_LISTS} with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[int]): A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.jsonl""") with open(lowerCamelCase , """w""") as f: for item in DATA: f.write(json.dumps(lowerCamelCase) + """\n""") return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : List[str]): A_ : Dict = str(tmp_path_factory.mktemp("""data""") / """dataset2.jsonl""") with open(lowerCamelCase , """w""") as f: for item in DATA: f.write(json.dumps(lowerCamelCase) + """\n""") return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : List[Any]): A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset_312.jsonl""") with open(lowerCamelCase , """w""") as f: for item in DATA_312: f.write(json.dumps(lowerCamelCase) + """\n""") return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : int): A_ : Dict = str(tmp_path_factory.mktemp("""data""") / """dataset-str.jsonl""") with open(lowerCamelCase , """w""") as f: for item in DATA_STR: f.write(json.dumps(lowerCamelCase) + """\n""") return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : List[Any]): import gzip A_ : List[str] = str(tmp_path_factory.mktemp("""data""") / """dataset.txt.gz""") with open(lowerCamelCase , """rb""") as orig_file: with gzip.open(lowerCamelCase , """wb""") as zipped_file: zipped_file.writelines(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[Any]): import gzip A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.jsonl.gz""") with open(lowerCamelCase , """rb""") as orig_file: with gzip.open(lowerCamelCase , """wb""") as zipped_file: zipped_file.writelines(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : List[Any]): A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset.jsonl.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]): A_ : Tuple = tmp_path_factory.mktemp("""data""") / """dataset_nested.jsonl.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase))) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : str): A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.jsonl.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase))) f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase))) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]): A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset.jsonl.tar""" with tarfile.TarFile(lowerCamelCase , """w""") as f: f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Any): A_ : str = tmp_path_factory.mktemp("""data""") / """dataset_nested.jsonl.tar""" with tarfile.TarFile(lowerCamelCase , """w""") as f: f.add(lowerCamelCase , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase))) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[Any]): A_ : Optional[Any] = ["""0""", """1""", """2""", """3"""] A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset.txt""") with open(lowerCamelCase , """w""") as f: for item in data: f.write(item + """\n""") return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : List[Any]): A_ : Optional[int] = ["""0""", """1""", """2""", """3"""] A_ : List[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset2.txt""") with open(lowerCamelCase , """w""") as f: for item in data: f.write(item + """\n""") return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : str): A_ : Dict = ["""0""", """1""", """2""", """3"""] A_ : Optional[int] = tmp_path_factory.mktemp("""data""") / """dataset.abc""" with open(lowerCamelCase , """w""") as f: for item in data: f.write(item + """\n""") return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : int): A_ : List[Any] = tmp_path_factory.mktemp("""data""") / """dataset.text.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Tuple): A_ : str = tmp_path_factory.mktemp("""data""") / """dataset_with_dir.text.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase))) f.write(lowerCamelCase , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase))) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[Any]): A_ : int = tmp_path_factory.mktemp("""data""") / """dataset.ext.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.basename("""unsupported.ext""")) f.write(lowerCamelCase , arcname=os.path.basename("""unsupported_2.ext""")) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Tuple): A_ : str = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""]) A_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""") / """dataset_with_unicode_new_lines.txt""") with open(lowerCamelCase , """w""" , encoding="""utf-8""") as f: f.write(lowerCamelCase) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( ): return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""") @pytest.fixture(scope="""session""") def lowerCamelCase ( ): return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""") @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[int]): A_ : Any = tmp_path_factory.mktemp("""data""") / """dataset.img.zip""" with zipfile.ZipFile(lowerCamelCase , """w""") as f: f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase)) f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase).replace(""".jpg""" , """2.jpg""")) return path @pytest.fixture(scope="""session""") def lowerCamelCase ( lowerCamelCase : List[str]): A_ : str = tmp_path_factory.mktemp("""data_dir""") (data_dir / "subdir").mkdir() with open(data_dir / """subdir""" / """train.txt""" , """w""") as f: f.write("""foo\n""" * 10) with open(data_dir / """subdir""" / """test.txt""" , """w""") as f: f.write("""bar\n""" * 10) # hidden file with open(data_dir / """subdir""" / """.test.txt""" , """w""") as f: f.write("""bar\n""" * 10) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / """.subdir""" / """train.txt""" , """w""") as f: f.write("""foo\n""" * 10) with open(data_dir / """.subdir""" / """test.txt""" , """w""") as f: f.write("""bar\n""" * 10) return data_dir
27
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """ViltImageProcessor""" a_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ): '''simple docstring''' A_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a ,_a ) A_ : Optional[Any] = self.image_processor def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,): '''simple docstring''' A_ : int = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) # add pixel_values + pixel_mask A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a ) encoding.update(_a ) return encoding def _a ( self : List[Any] ,*_a : Any ,**_a : Any ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : int ,*_a : int ,**_a : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = self.tokenizer.model_input_names A_ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : str ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,) return self.image_processor_class @property def _a ( self : int ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,) return self.image_processor
27
1
'''simple docstring''' import argparse import hashlib # hashlib is only used inside the Test class import struct class __lowerCAmelCase : '''simple docstring''' def __init__( self : str ,_a : Any ): '''simple docstring''' A_ : Optional[int] = data A_ : Optional[Any] = [0X67452301, 0XEFCDAB89, 0X98BADCFE, 0X10325476, 0XC3D2E1F0] @staticmethod def _a ( _a : Dict ,_a : Tuple ): '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0XFFFFFFFF def _a ( self : Tuple ): '''simple docstring''' A_ : str = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) A_ : Union[str, Any] = self.data + padding + struct.pack(""">Q""" ,8 * len(self.data ) ) return padded_data def _a ( self : Optional[Any] ): '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0 ,len(self.padded_data ) ,64 ) ] def _a ( self : Union[str, Any] ,_a : Tuple ): '''simple docstring''' A_ : Optional[Any] = list(struct.unpack(""">16L""" ,_a ) ) + [0] * 64 for i in range(16 ,80 ): A_ : List[str] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) ,1 ) return w def _a ( self : Tuple ): '''simple docstring''' A_ : Optional[int] = self.padding() A_ : Union[str, Any] = self.split_blocks() for block in self.blocks: A_ : Tuple = self.expand_block(_a ) A_ , A_ , A_ , A_ , A_ : str = self.h for i in range(0 ,80 ): if 0 <= i < 20: A_ : Optional[Any] = (b & c) | ((~b) & d) A_ : str = 0X5A827999 elif 20 <= i < 40: A_ : int = b ^ c ^ d A_ : Union[str, Any] = 0X6ED9EBA1 elif 40 <= i < 60: A_ : List[Any] = (b & c) | (b & d) | (c & d) A_ : Union[str, Any] = 0X8F1BBCDC elif 60 <= i < 80: A_ : List[str] = b ^ c ^ d A_ : Tuple = 0XCA62C1D6 A_ , A_ , A_ , A_ , A_ : Tuple = ( self.rotate(_a ,5 ) + f + e + k + expanded_block[i] & 0XFFFFFFFF, a, self.rotate(_a ,30 ), c, d, ) A_ : Union[str, Any] = ( self.h[0] + a & 0XFFFFFFFF, self.h[1] + b & 0XFFFFFFFF, self.h[2] + c & 0XFFFFFFFF, self.h[3] + d & 0XFFFFFFFF, self.h[4] + e & 0XFFFFFFFF, ) return ("{:08x}" * 5).format(*self.h ) def lowerCamelCase ( ): A_ : Union[str, Any] = b"""Test String""" assert SHAaHash(lowerCamelCase).final_hash() == hashlib.shaa(lowerCamelCase).hexdigest() # noqa: S324 def lowerCamelCase ( ): A_ : Tuple = argparse.ArgumentParser(description="""Process some strings or files""") parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""") A_ : Any = parser.parse_args() A_ : Tuple = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""") as f: A_ : List[str] = f.read() else: A_ : List[Any] = bytes(lowerCamelCase , """utf-8""") print(SHAaHash(lowerCamelCase).final_hash()) if __name__ == "__main__": main() import doctest doctest.testmod()
27
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""torch""", """torchsde"""] def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ): '''simple docstring''' requires_backends(self ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] )
27
1
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"): A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {} A_ : Optional[int] = padding_side return tokenizer( [line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , ) def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ): A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,): '''simple docstring''' super().__init__() A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" ) A_ : Any = Path(_a ).joinpath(type_path + """.target""" ) A_ : Dict = self.get_char_lens(self.src_file ) A_ : Optional[int] = max_source_length A_ : List[str] = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' A_ : List[Any] = tokenizer A_ : Optional[Any] = prefix if n_obs is not None: A_ : Any = self.src_lens[:n_obs] A_ : Optional[int] = src_lang A_ : Tuple = tgt_lang def __len__( self : Tuple ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[str] ,_a : Tuple ): '''simple docstring''' A_ : int = index + 1 # linecache starts at 1 A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" ) A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ : List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer ) A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" ) A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" ) A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze() A_ : Dict = target_inputs["""input_ids"""].squeeze() A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _a ( _a : int ): '''simple docstring''' return [len(_a ) for x in Path(_a ).open().readlines()] def _a ( self : Optional[int] ,_a : Dict ): '''simple docstring''' A_ : str = torch.stack([x["""input_ids"""] for x in batch] ) A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] ) A_ : Union[str, Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : List[str] = trim_batch(_a ,_a ) A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a ) A_ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __magic_name__ = getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[List]): return list(itertools.chain.from_iterable(lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : str): A_ : Union[str, Any] = get_git_info() save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json""")) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]): with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Any): with open(lowerCamelCase) as f: return json.load(lowerCamelCase) def lowerCamelCase ( ): A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase) A_ : Union[str, Any] = { """repo_id""": str(lowerCamelCase), """repo_sha""": str(repo.head.object.hexsha), """repo_branch""": str(repo.active_branch), """hostname""": str(socket.gethostname()), } return repo_infos def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable): return list(map(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]): with open(lowerCamelCase , """wb""") as f: return pickle.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str]): def remove_articles(lowerCamelCase : Any): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase) def white_space_fix(lowerCamelCase : List[Any]): return " ".join(text.split()) def remove_punc(lowerCamelCase : Union[str, Any]): A_ : Optional[int] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(lowerCamelCase : List[str]): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase)))) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): A_ : Tuple = normalize_answer(lowerCamelCase).split() A_ : Dict = normalize_answer(lowerCamelCase).split() A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase) A_ : Any = sum(common.values()) if num_same == 0: return 0 A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any): return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]): assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Any = 0 for hypo, pred in zip(lowerCamelCase , lowerCamelCase): em += exact_match_score(lowerCamelCase , lowerCamelCase) if len(lowerCamelCase) > 0: em /= len(lowerCamelCase) return {"em": em} def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return model_prefix.startswith("""rag""") def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]): A_ : Optional[Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ : Tuple = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase): if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) continue A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p] setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) return hparams, config
27
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"): A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {} A_ : Optional[int] = padding_side return tokenizer( [line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , ) def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ): A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,): '''simple docstring''' super().__init__() A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" ) A_ : Any = Path(_a ).joinpath(type_path + """.target""" ) A_ : Dict = self.get_char_lens(self.src_file ) A_ : Optional[int] = max_source_length A_ : List[str] = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' A_ : List[Any] = tokenizer A_ : Optional[Any] = prefix if n_obs is not None: A_ : Any = self.src_lens[:n_obs] A_ : Optional[int] = src_lang A_ : Tuple = tgt_lang def __len__( self : Tuple ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[str] ,_a : Tuple ): '''simple docstring''' A_ : int = index + 1 # linecache starts at 1 A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" ) A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ : List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer ) A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" ) A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" ) A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze() A_ : Dict = target_inputs["""input_ids"""].squeeze() A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _a ( _a : int ): '''simple docstring''' return [len(_a ) for x in Path(_a ).open().readlines()] def _a ( self : Optional[int] ,_a : Dict ): '''simple docstring''' A_ : str = torch.stack([x["""input_ids"""] for x in batch] ) A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] ) A_ : Union[str, Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : List[str] = trim_batch(_a ,_a ) A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a ) A_ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __magic_name__ = getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[List]): return list(itertools.chain.from_iterable(lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : str): A_ : Union[str, Any] = get_git_info() save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json""")) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]): with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Any): with open(lowerCamelCase) as f: return json.load(lowerCamelCase) def lowerCamelCase ( ): A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase) A_ : Union[str, Any] = { """repo_id""": str(lowerCamelCase), """repo_sha""": str(repo.head.object.hexsha), """repo_branch""": str(repo.active_branch), """hostname""": str(socket.gethostname()), } return repo_infos def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable): return list(map(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]): with open(lowerCamelCase , """wb""") as f: return pickle.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str]): def remove_articles(lowerCamelCase : Any): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase) def white_space_fix(lowerCamelCase : List[Any]): return " ".join(text.split()) def remove_punc(lowerCamelCase : Union[str, Any]): A_ : Optional[int] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(lowerCamelCase : List[str]): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase)))) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): A_ : Tuple = normalize_answer(lowerCamelCase).split() A_ : Dict = normalize_answer(lowerCamelCase).split() A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase) A_ : Any = sum(common.values()) if num_same == 0: return 0 A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any): return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]): assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Any = 0 for hypo, pred in zip(lowerCamelCase , lowerCamelCase): em += exact_match_score(lowerCamelCase , lowerCamelCase) if len(lowerCamelCase) > 0: em /= len(lowerCamelCase) return {"em": em} def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return model_prefix.startswith("""rag""") def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]): A_ : Optional[Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ : Tuple = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase): if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) continue A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p] setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) return hparams, config
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __magic_name__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' import os import string import sys __magic_name__ = 1 << 8 __magic_name__ = { 'tab': ord('\t'), 'newline': ord('\r'), 'esc': 27, 'up': 65 + ARROW_KEY_FLAG, 'down': 66 + ARROW_KEY_FLAG, 'right': 67 + ARROW_KEY_FLAG, 'left': 68 + ARROW_KEY_FLAG, 'mod_int': 91, 'undefined': sys.maxsize, 'interrupt': 3, 'insert': 50, 'delete': 51, 'pg_up': 53, 'pg_down': 54, } __magic_name__ = KEYMAP['up'] __magic_name__ = KEYMAP['left'] if sys.platform == "win32": __magic_name__ = [] __magic_name__ = { b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG, b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG, b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG, b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG, b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG, } for i in range(10): __magic_name__ = ord(str(i)) def lowerCamelCase ( ): if os.name == "nt": import msvcrt A_ : List[Any] = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowerCamelCase) == 0: # Read the keystroke A_ : Optional[int] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): A_ : Optional[int] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: A_ : Optional[int] = chr(WIN_KEYMAP[cha]) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""])) WIN_CH_BUFFER.append(lowerCamelCase) if ord(lowerCamelCase) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126)) A_ : List[Any] = chr(KEYMAP["""esc"""]) except KeyError: A_ : str = cha[1] else: A_ : List[Any] = ch.decode(lowerCamelCase) else: A_ : List[Any] = WIN_CH_BUFFER.pop(0) elif os.name == "posix": import termios import tty A_ : Optional[int] = sys.stdin.fileno() A_ : Union[str, Any] = termios.tcgetattr(lowerCamelCase) try: tty.setraw(lowerCamelCase) A_ : Optional[Any] = sys.stdin.read(1) finally: termios.tcsetattr(lowerCamelCase , termios.TCSADRAIN , lowerCamelCase) return ch def lowerCamelCase ( ): A_ : Union[str, Any] = get_raw_chars() if ord(lowerCamelCase) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowerCamelCase) == KEYMAP["esc"]: A_ : str = get_raw_chars() if ord(lowerCamelCase) == KEYMAP["mod_int"]: A_ : List[str] = get_raw_chars() if ord(lowerCamelCase) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCamelCase) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowerCamelCase) + ARROW_KEY_FLAG) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = KandinskyVaaControlnetPipeline a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a_ = False @property def _a ( self : Any ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return self.time_input_dim @property def _a ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): '''simple docstring''' return 100 @property def _a ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[Any] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } A_ : Tuple = UNetaDConditionModel(**_a ) return model @property def _a ( self : List[str] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _a ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) A_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : List[str] ): '''simple docstring''' A_ : Optional[Any] = self.dummy_unet A_ : int = self.dummy_movq A_ : Tuple = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,) A_ : int = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ): '''simple docstring''' A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a ) A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( _a ) # create hint A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("""mps""" ): A_ : Optional[Any] = torch.manual_seed(_a ) else: A_ : str = torch.Generator(device=_a ).manual_seed(_a ) A_ : List[Any] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def _a ( self : Dict ): '''simple docstring''' A_ : List[Any] = """cpu""" A_ : List[str] = self.get_dummy_components() A_ : Tuple = self.pipeline_class(**_a ) A_ : Dict = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) ) A_ : Tuple = output.images A_ : Optional[Any] = pipe( **self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0] A_ : Tuple = image[0, -3:, -3:, -1] A_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[Any] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Any ): '''simple docstring''' A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) A_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0 A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(_a ) A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) A_ : Union[str, Any] = pipeline.to(_a ) pipeline.set_progress_bar_config(disable=_a ) A_ : Optional[Any] = """A robot, 4k photo""" A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ , A_ : List[str] = pipe_prior( _a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ : List[Any] = pipeline( image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,) A_ : Dict = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_a ,_a )
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if density <= 0: raise ValueError("""Impossible fluid density""") if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""") return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """deberta-v2""" def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,): '''simple docstring''' super().__init__(**_a ) A_ : Union[str, Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[Any] = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : List[Any] = initializer_range A_ : int = relative_attention A_ : Tuple = max_relative_positions A_ : int = pad_token_id A_ : Tuple = position_biased_input # Backwards compatibility if type(_a ) == str: A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )] A_ : Any = pos_att_type A_ : Optional[int] = vocab_size A_ : Tuple = layer_norm_eps A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a ) A_ : Union[str, Any] = pooler_dropout A_ : List[Any] = pooler_hidden_act class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : Any = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def _a ( self : Optional[int] ): '''simple docstring''' return 12 def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : Dict): A_ : Tuple = 0 A_ : Union[str, Any] = len(lowerCamelCase) for i in range(n - 1): for j in range(i + 1 , lowerCamelCase): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def lowerCamelCase ( lowerCamelCase : Union[str, Any]): if len(lowerCamelCase) <= 1: return arr, 0 A_ : Optional[Any] = len(lowerCamelCase) // 2 A_ : List[Any] = arr[0:mid] A_ : Tuple = arr[mid:] A_ , A_ : str = count_inversions_recursive(lowerCamelCase) A_ , A_ : str = count_inversions_recursive(lowerCamelCase) A_ , A_ : Any = _count_cross_inversions(lowerCamelCase , lowerCamelCase) A_ : List[str] = inversion_p + inversions_q + cross_inversions return c, num_inversions def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : str): A_ : Optional[int] = [] A_ : Union[str, Any] = 0 while i < len(lowerCamelCase) and j < len(lowerCamelCase): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(lowerCamelCase) - i r.append(q[j]) j += 1 else: r.append(p[i]) i += 1 if i < len(lowerCamelCase): r.extend(p[i:]) else: r.extend(q[j:]) return r, num_inversion def lowerCamelCase ( ): A_ : str = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) A_ : Tuple = count_inversions_bf(lowerCamelCase) A_ , A_ : Union[str, Any] = count_inversions_recursive(lowerCamelCase) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , lowerCamelCase) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() A_ : str = count_inversions_bf(lowerCamelCase) A_ , A_ : str = count_inversions_recursive(lowerCamelCase) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , lowerCamelCase) # an empty list should also have zero inversions A_ : Any = [] A_ : List[str] = count_inversions_bf(lowerCamelCase) A_ , A_ : Tuple = count_inversions_recursive(lowerCamelCase) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , lowerCamelCase) if __name__ == "__main__": main()
27
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') __magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) __magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) __magic_name__ = BeautifulSoup(res.text, 'html.parser') __magic_name__ = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(f"""https://google.com{link.get('href')}""")
27
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ,return_dict=_a ).to(_a ) A_ : int = AutoTokenizer.from_pretrained("""google/mt5-small""" ) A_ : int = tokenizer("""Hello there""" ,return_tensors="""pt""" ).input_ids A_ : Union[str, Any] = tokenizer("""Hi I am""" ,return_tensors="""pt""" ).input_ids A_ : Tuple = model(input_ids.to(_a ) ,labels=labels.to(_a ) ).loss A_ : Optional[int] = -(labels.shape[-1] * loss.item()) A_ : Optional[int] = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
27
'''simple docstring''' from ... import PretrainedConfig __magic_name__ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = """nezha""" def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Tuple = hidden_act A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Optional[Any] = max_relative_position A_ : List[Any] = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : Dict = classifier_dropout A_ : int = use_cache
27
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Dict): A_ : List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: A_ : Union[str, Any] = [144, 192, 240] A_ : int = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: A_ : List[str] = [96, 120, 144] A_ : Any = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: A_ : Any = [64, 80, 96] A_ : List[str] = [16, 16, 24, 48, 64, 80, 320] A_ : Any = 0.05 A_ : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_"""): A_ : int = 512 A_ : Optional[int] = 16 A_ : List[Any] = 21 A_ : List[str] = """pascal-voc-id2label.json""" else: A_ : str = 1000 A_ : Any = """imagenet-1k-id2label.json""" A_ : Any = """huggingface/label-files""" A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r""")) A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False): for i in range(1 , 6): if F'layer_{i}.' in name: A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.') if "conv_1." in name: A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""") if ".block." in name: A_ : Optional[Any] = name.replace(""".block.""" , """.""") if "exp_1x1" in name: A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""") if "red_1x1" in name: A_ : int = name.replace("""red_1x1""" , """reduce_1x1""") if ".local_rep.conv_3x3." in name: A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""") if ".local_rep.conv_1x1." in name: A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""") if ".norm." in name: A_ : Tuple = name.replace(""".norm.""" , """.normalization.""") if ".conv." in name: A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""") if ".conv_proj." in name: A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""") for i in range(0 , 2): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.') if "expand_1x1" in name: A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""") if "conv_3x3" in name: A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""") if "reduce_1x1" in name: A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""") for i in range(2 , 5): if F'.global_rep.{i}.weight' in name: A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""") if F'.global_rep.{i}.bias' in name: A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""") if ".global_rep." in name: A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""") if ".pre_norm_mha.0." in name: A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""") if ".pre_norm_mha.1.out_proj." in name: A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""") if ".pre_norm_ffn.0." in name: A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""") if ".pre_norm_ffn.1." in name: A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""") if ".pre_norm_ffn.4." in name: A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""") if ".transformer." in name: A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""") if ".aspp_layer." in name: A_ : int = name.replace(""".aspp_layer.""" , """.""") if ".aspp_pool." in name: A_ : Tuple = name.replace(""".aspp_pool.""" , """.""") if "seg_head." in name: A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""") if "segmentation_head.classifier.classifier." in name: A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""") if "classifier.fc." in name: A_ : str = name.replace("""classifier.fc.""" , """classifier.""") elif (not base_model) and ("segmentation_head." not in name): A_ : str = """mobilevit.""" + name return name def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False): if base_model: A_ : Dict = """""" else: A_ : Any = """mobilevit.""" for key in orig_state_dict.copy().keys(): A_ : List[Any] = orig_state_dict.pop(lowerCamelCase) if key[:8] == "encoder.": A_ : int = key[8:] if "qkv" in key: A_ : Any = key.split(""".""") A_ : str = int(key_split[0][6:]) - 1 A_ : int = int(key_split[3]) A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}') A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size A_ : Optional[Any] = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: A_ : Dict = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : List[Any] = val[-dim:, :] else: A_ : Optional[Any] = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : Any = val[-dim:] else: A_ : List[str] = val return orig_state_dict def lowerCamelCase ( ): A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw) return im @torch.no_grad() def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False): A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase) # load original state_dict A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""") # load 🤗 model if mobilevit_name.startswith("""deeplabv3_"""): A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval() else: A_ : str = MobileViTForImageClassification(lowerCamelCase).eval() A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase) model.load_state_dict(lowerCamelCase) # Check outputs on an image, prepared by MobileViTImageProcessor A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") A_ : List[Any] = model(**lowerCamelCase) A_ : Dict = outputs.logits if mobilevit_name.startswith("""deeplabv3_"""): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": A_ : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": A_ : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": A_ : Tuple = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4) Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase) print(F'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase) if push_to_hub: A_ : str = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""") A_ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase , organization="""apple""") model.push_to_hub(lowerCamelCase , organization="""apple""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __magic_name__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
27
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str): A_ , A_ : List[Any] = set(lowerCamelCase), [start] while stack: A_ : Optional[Any] = stack.pop() explored.add(lowerCamelCase) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(lowerCamelCase) return explored __magic_name__ = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
27
1
'''simple docstring''' from collections import defaultdict class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] ,_a : List[str] ,_a : Optional[int] ): '''simple docstring''' A_ : Optional[int] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 A_ : Union[str, Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(_a ) ) ] A_ : Any = defaultdict(_a ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 A_ : str = (1 << len(_a )) - 1 def _a ( self : Union[str, Any] ,_a : Union[str, Any] ,_a : List[str] ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement A_ : Optional[int] = self.count_ways_until(_a ,task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 ) # save the value. A_ : int = total_ways_util return self.dp[mask][task_no] def _a ( self : List[Any] ,_a : Optional[Any] ): '''simple docstring''' for i in range(len(_a ) ): for j in task_performed[i]: self.task[j].append(_a ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 ,1 ) if __name__ == "__main__": __magic_name__ = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. __magic_name__ = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Dict): A_ : List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: A_ : Union[str, Any] = [144, 192, 240] A_ : int = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: A_ : List[str] = [96, 120, 144] A_ : Any = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: A_ : Any = [64, 80, 96] A_ : List[str] = [16, 16, 24, 48, 64, 80, 320] A_ : Any = 0.05 A_ : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_"""): A_ : int = 512 A_ : Optional[int] = 16 A_ : List[Any] = 21 A_ : List[str] = """pascal-voc-id2label.json""" else: A_ : str = 1000 A_ : Any = """imagenet-1k-id2label.json""" A_ : Any = """huggingface/label-files""" A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r""")) A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False): for i in range(1 , 6): if F'layer_{i}.' in name: A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.') if "conv_1." in name: A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""") if ".block." in name: A_ : Optional[Any] = name.replace(""".block.""" , """.""") if "exp_1x1" in name: A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""") if "red_1x1" in name: A_ : int = name.replace("""red_1x1""" , """reduce_1x1""") if ".local_rep.conv_3x3." in name: A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""") if ".local_rep.conv_1x1." in name: A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""") if ".norm." in name: A_ : Tuple = name.replace(""".norm.""" , """.normalization.""") if ".conv." in name: A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""") if ".conv_proj." in name: A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""") for i in range(0 , 2): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.') if "expand_1x1" in name: A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""") if "conv_3x3" in name: A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""") if "reduce_1x1" in name: A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""") for i in range(2 , 5): if F'.global_rep.{i}.weight' in name: A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""") if F'.global_rep.{i}.bias' in name: A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""") if ".global_rep." in name: A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""") if ".pre_norm_mha.0." in name: A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""") if ".pre_norm_mha.1.out_proj." in name: A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""") if ".pre_norm_ffn.0." in name: A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""") if ".pre_norm_ffn.1." in name: A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""") if ".pre_norm_ffn.4." in name: A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""") if ".transformer." in name: A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""") if ".aspp_layer." in name: A_ : int = name.replace(""".aspp_layer.""" , """.""") if ".aspp_pool." in name: A_ : Tuple = name.replace(""".aspp_pool.""" , """.""") if "seg_head." in name: A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""") if "segmentation_head.classifier.classifier." in name: A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""") if "classifier.fc." in name: A_ : str = name.replace("""classifier.fc.""" , """classifier.""") elif (not base_model) and ("segmentation_head." not in name): A_ : str = """mobilevit.""" + name return name def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False): if base_model: A_ : Dict = """""" else: A_ : Any = """mobilevit.""" for key in orig_state_dict.copy().keys(): A_ : List[Any] = orig_state_dict.pop(lowerCamelCase) if key[:8] == "encoder.": A_ : int = key[8:] if "qkv" in key: A_ : Any = key.split(""".""") A_ : str = int(key_split[0][6:]) - 1 A_ : int = int(key_split[3]) A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}') A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size A_ : Optional[Any] = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: A_ : Dict = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : List[Any] = val[-dim:, :] else: A_ : Optional[Any] = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : Any = val[-dim:] else: A_ : List[str] = val return orig_state_dict def lowerCamelCase ( ): A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw) return im @torch.no_grad() def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False): A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase) # load original state_dict A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""") # load 🤗 model if mobilevit_name.startswith("""deeplabv3_"""): A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval() else: A_ : str = MobileViTForImageClassification(lowerCamelCase).eval() A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase) model.load_state_dict(lowerCamelCase) # Check outputs on an image, prepared by MobileViTImageProcessor A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") A_ : List[Any] = model(**lowerCamelCase) A_ : Dict = outputs.logits if mobilevit_name.startswith("""deeplabv3_"""): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": A_ : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": A_ : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": A_ : Tuple = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4) Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase) print(F'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase) if push_to_hub: A_ : str = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""") A_ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase , organization="""apple""") model.push_to_hub(lowerCamelCase , organization="""apple""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __magic_name__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
27
1
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] ,_a : int ,_a : int=7 ,_a : str=3 ,_a : Dict=18 ,_a : Optional[int]=30 ,_a : int=400 ,_a : List[Any]=True ,_a : List[str]=None ,_a : Any=True ,_a : Any=[0.5, 0.5, 0.5] ,_a : Tuple=[0.5, 0.5, 0.5] ,): '''simple docstring''' A_ : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 18} A_ : Tuple = parent A_ : Any = batch_size A_ : Tuple = num_channels A_ : int = image_size A_ : Union[str, Any] = min_resolution A_ : List[str] = max_resolution A_ : str = do_resize A_ : Dict = size A_ : str = do_normalize A_ : Union[str, Any] = image_mean A_ : str = image_std def _a ( self : Union[str, Any] ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = DPTImageProcessor if is_vision_available() else None def _a ( self : List[Any] ): '''simple docstring''' A_ : Tuple = DPTImageProcessingTester(self ) @property def _a ( self : Dict ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : int ): '''simple docstring''' A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_a ,"""image_mean""" ) ) self.assertTrue(hasattr(_a ,"""image_std""" ) ) self.assertTrue(hasattr(_a ,"""do_normalize""" ) ) self.assertTrue(hasattr(_a ,"""do_resize""" ) ) self.assertTrue(hasattr(_a ,"""size""" ) ) def _a ( self : Any ): '''simple docstring''' A_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} ) A_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ) for image in image_inputs: self.assertIsInstance(_a ,Image.Image ) # Test not batched input A_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched A_ : Union[str, Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def _a ( self : Tuple ): '''simple docstring''' A_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a ) for image in image_inputs: self.assertIsInstance(_a ,np.ndarray ) # Test not batched input A_ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched A_ : Optional[Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a ) for image in image_inputs: self.assertIsInstance(_a ,torch.Tensor ) # Test not batched input A_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched A_ : Union[str, Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,)
27
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __magic_name__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""pixel_values"""] def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,): '''simple docstring''' super().__init__(**_a ) A_ : Tuple = size if size is not None else {"""shortest_edge""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" ) A_ : Any = do_resize A_ : List[str] = size A_ : Union[str, Any] = resample A_ : Dict = do_center_crop A_ : List[str] = crop_size A_ : Any = do_rescale A_ : Union[str, Any] = rescale_factor A_ : Any = do_normalize A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Tuple = do_convert_rgb def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,): '''simple docstring''' A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a ) return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a ) def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,): '''simple docstring''' A_ : Optional[int] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,): '''simple docstring''' return rescale(_a ,scale=_a ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,): '''simple docstring''' return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a ) def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,): '''simple docstring''' A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A_ : Tuple = size if size is not None else self.size A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a ) A_ : List[str] = resample if resample is not None else self.resample A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a ) A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Any = do_normalize if do_normalize is not None else self.do_normalize A_ : int = image_mean if image_mean is not None else self.image_mean A_ : int = image_std if image_std is not None else self.image_std A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : int = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : Optional[int] = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. A_ : Dict = [to_numpy_array(_a ) for image in images] if do_resize: A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images] if do_center_crop: A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images] if do_normalize: A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images] A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images] A_ : List[str] = {"""pixel_values""": images} return BatchFeature(data=_a ,tensor_type=_a )
27
1
'''simple docstring''' from itertools import permutations def lowerCamelCase ( lowerCamelCase : tuple): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False A_ : Union[str, Any] = [7, 11, 13, 17] for i, test in enumerate(lowerCamelCase): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCamelCase ( lowerCamelCase : int = 10): return sum( int("""""".join(map(lowerCamelCase , lowerCamelCase))) for num in permutations(range(lowerCamelCase)) if is_substring_divisible(lowerCamelCase)) if __name__ == "__main__": print(f"""{solution() = }""")
27
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ): '''simple docstring''' warnings.warn( """The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use OwlViTImageProcessor instead.""" ,_a ,) super().__init__(*_a ,**_a )
27
1
'''simple docstring''' from __future__ import annotations from math import ceil, floor, sqrt def lowerCamelCase ( lowerCamelCase : int = 200_0000): A_ : list[int] = [0] A_ : int for idx in range(1 , ceil(sqrt(target * 2) * 1.1)): triangle_numbers.append(triangle_numbers[-1] + idx) # we want this to be as close as possible to target A_ : int = 0 # the area corresponding to the grid that gives the product closest to target A_ : int = 0 # an estimate of b, using the quadratic formula A_ : float # the largest integer less than b_estimate A_ : int # the largest integer less than b_estimate A_ : int # the triangle number corresponding to b_floor A_ : int # the triangle number corresponding to b_ceil A_ : int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1): A_ : Tuple = (-1 + sqrt(1 + 8 * target / triangle_a)) / 2 A_ : Tuple = floor(lowerCamelCase) A_ : Optional[int] = ceil(lowerCamelCase) A_ : Tuple = triangle_numbers[b_floor] A_ : Optional[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a) < abs( target - best_product): A_ : str = triangle_b_first_guess * triangle_a A_ : List[Any] = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a) < abs( target - best_product): A_ : Optional[int] = triangle_b_second_guess * triangle_a A_ : int = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
27
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ): A_ : int = symbols(lowerCamelCase) A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase) A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase)) A_ : str = starting_point while True: if diff_function(lowerCamelCase) != 0: A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function( lowerCamelCase) else: raise ZeroDivisionError("""Could not find root""") from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess) < precision: return next_guess A_ : Union[str, Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( 'The root of log(y) - 1 = 0 is ', f"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
27
1
'''simple docstring''' import argparse import os import re import packaging.version __magic_name__ = 'examples/' __magic_name__ = { 'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), 'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } __magic_name__ = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } __magic_name__ = 'README.md' def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]): with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""") as f: A_ : int = f.read() A_ , A_ : Union[str, Any] = REPLACE_PATTERNS[pattern] A_ : Union[str, Any] = replace.replace("""VERSION""" , lowerCamelCase) A_ : List[Any] = re_pattern.sub(lowerCamelCase , lowerCamelCase) with open(lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""") as f: f.write(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : str): for folder, directories, fnames in os.walk(lowerCamelCase): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""") if "legacy" in directories: directories.remove("""legacy""") for fname in fnames: if fname.endswith(""".py"""): update_version_in_file(os.path.join(lowerCamelCase , lowerCamelCase) , lowerCamelCase , pattern="""examples""") def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Tuple=False): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowerCamelCase , lowerCamelCase , lowerCamelCase) if not patch: update_version_in_examples(lowerCamelCase) def lowerCamelCase ( ): A_ : Optional[Any] = """🤗 Transformers currently provides the following architectures""" A_ : Optional[int] = """1. Want to contribute a new model?""" with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""") as f: A_ : List[Any] = f.readlines() # Find the start of the list. A_ : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt): start_index += 1 start_index += 1 A_ : List[str] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt): if lines[index].startswith("""1."""): A_ : Optional[int] = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""") as f: f.writelines(lowerCamelCase) def lowerCamelCase ( ): with open(REPLACE_FILES["""init"""] , """r""") as f: A_ : Optional[Any] = f.read() A_ : int = REPLACE_PATTERNS["""init"""][0].search(lowerCamelCase).groups()[0] return packaging.version.parse(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Dict=False): A_ : Dict = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""") if default_version.is_devrelease: A_ : Optional[int] = default_version.base_version elif patch: A_ : int = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: A_ : Union[str, Any] = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. A_ : Tuple = input(F'Which version are you releasing? [{default_version}]') if len(lowerCamelCase) == 0: A_ : List[str] = default_version print(F'Updating version to {version}.') global_version_update(lowerCamelCase , patch=lowerCamelCase) def lowerCamelCase ( ): A_ : Optional[Any] = get_version() A_ : Tuple = F'{current_version.major}.{current_version.minor + 1}.0.dev0' A_ : List[str] = current_version.base_version # Check with the user we got that right. A_ : List[Any] = input(F'Which version are we developing now? [{dev_version}]') if len(lowerCamelCase) == 0: A_ : Tuple = dev_version print(F'Updating version to {version}.') global_version_update(lowerCamelCase) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') __magic_name__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
27
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_a : Dict ): '''simple docstring''' super().__init__() A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a ) A_ : int = list(model.children() )[:-2] A_ : int = nn.Sequential(*_a ) A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : str ,_a : Optional[int] ): '''simple docstring''' A_ : Tuple = self.pool(self.model(_a ) ) A_ : Any = torch.flatten(_a ,start_dim=2 ) A_ : str = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ): '''simple docstring''' A_ : Dict = [json.loads(_a ) for l in open(_a )] A_ : Optional[int] = os.path.dirname(_a ) A_ : Optional[Any] = tokenizer A_ : Optional[Any] = labels A_ : List[Any] = len(_a ) A_ : str = max_seq_length A_ : str = transforms def __len__( self : str ): '''simple docstring''' return len(self.data ) def __getitem__( self : Tuple ,_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) ) A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1] A_ : Optional[int] = sentence[: self.max_seq_length] A_ : Any = torch.zeros(self.n_classes ) A_ : Tuple = 1 A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" ) A_ : Union[str, Any] = self.transforms(_a ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : List[Any] ): '''simple docstring''' A_ : str = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( lowerCamelCase : str): A_ : List[Any] = [len(row["""sentence"""]) for row in batch] A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase) A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)): A_ : str = input_row["""sentence"""] A_ : Tuple = 1 A_ : int = torch.stack([row["""image"""] for row in batch]) A_ : str = torch.stack([row["""label"""] for row in batch]) A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch]) A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ])
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int = 400_0000): A_ : Dict = [0, 1] A_ : str = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 A_ : Optional[int] = 0 for j in range(len(lowerCamelCase) - 1): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
27
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase ( lowerCamelCase : int): if num <= 0: A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.' raise ValueError(lowerCamelCase) A_ : str = [True] * (num + 1) A_ : Tuple = [] A_ : str = 2 A_ : Any = int(math.sqrt(lowerCamelCase)) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase): if sieve[i] is True: A_ : Union[str, Any] = False start += 1 for j in range(end + 1 , num + 1): if sieve[j] is True: prime.append(lowerCamelCase) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
27
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @property def _a ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : str = UNetaDModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,) return model def _a ( self : Tuple ): '''simple docstring''' A_ : Any = self.dummy_uncond_unet A_ : List[str] = KarrasVeScheduler() A_ : Union[str, Any] = KarrasVePipeline(unet=_a ,scheduler=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = torch.manual_seed(0 ) A_ : Union[str, Any] = pipe(num_inference_steps=2 ,generator=_a ,output_type="""numpy""" ).images A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Optional[int] = pipe(num_inference_steps=2 ,generator=_a ,output_type="""numpy""" ,return_dict=_a )[0] A_ : Dict = image[0, -3:, -3:, -1] A_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A_ : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Optional[int] ): '''simple docstring''' A_ : Tuple = """google/ncsnpp-celebahq-256""" A_ : Dict = UNetaDModel.from_pretrained(_a ) A_ : List[str] = KarrasVeScheduler() A_ : Any = KarrasVePipeline(unet=_a ,scheduler=_a ) pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : str = torch.manual_seed(0 ) A_ : Optional[int] = pipe(num_inference_steps=20 ,generator=_a ,output_type="""numpy""" ).images A_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) A_ : Dict = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
27
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __magic_name__ = trt.Logger(trt.Logger.WARNING) __magic_name__ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __magic_name__ = logging.getLogger(__name__) __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __magic_name__ = parser.parse_args() if args.tokenizer_name: __magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __magic_name__ = args.per_device_eval_batch_size __magic_name__ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __magic_name__ = True __magic_name__ = 'temp_engine/bert-fp32.engine' if args.fpaa: __magic_name__ = 'temp_engine/bert-fp16.engine' if args.inta: __magic_name__ = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __magic_name__ = [network.get_input(i) for i in range(network.num_inputs)] __magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __magic_name__ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __magic_name__ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __magic_name__ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]): A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa) A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa) A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase) # start time A_ : List[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Synchronize the stream and take time stream.synchronize() # end time A_ : str = time.time() A_ : Tuple = end_time - start_time A_ : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __magic_name__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __magic_name__ = raw_datasets['validation'].column_names __magic_name__ = 'question' if 'question' in column_names else column_names[0] __magic_name__ = 'context' if 'context' in column_names else column_names[1] __magic_name__ = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __magic_name__ = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __magic_name__ = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase ( lowerCamelCase : Dict): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A_ : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A_ : Union[str, Any] = [] for i in range(len(tokenized_examples["""input_ids"""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase) A_ : Tuple = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A_ : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A_ : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i]) ] return tokenized_examples __magic_name__ = raw_datasets['validation'] # Validation Feature Creation __magic_name__ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __magic_name__ = default_data_collator __magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __magic_name__ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. A_ : Tuple = postprocess_qa_predictions( examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A_ : Dict = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase) __magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize # Allocate device memory for inputs and outputs. __magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __magic_name__ = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f""" Num examples = {len(eval_dataset)}""") logger.info(f""" Batch size = {args.per_device_eval_batch_size}""") __magic_name__ = 0.0 __magic_name__ = 0 __magic_name__ = timeit.default_timer() __magic_name__ = None for step, batch in enumerate(eval_dataloader): __magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __magic_name__ , __magic_name__ = outputs __magic_name__ = torch.tensor(start_logits) __magic_name__ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __magic_name__ = nested_truncate(all_preds, len(eval_dataset)) __magic_name__ = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds) __magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"""Evaluation metrics: {eval_metric}""")
27
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""onnx"""] def __init__( self : Any ,*_a : int ,**_a : Optional[Any] ): '''simple docstring''' requires_backends(self ,["""onnx"""] ) @classmethod def _a ( cls : List[Any] ,*_a : Tuple ,**_a : str ): '''simple docstring''' requires_backends(cls ,["""onnx"""] ) @classmethod def _a ( cls : str ,*_a : List[Any] ,**_a : List[str] ): '''simple docstring''' requires_backends(cls ,["""onnx"""] )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __magic_name__ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['ConvNextFeatureExtractor'] __magic_name__ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
27
1
'''simple docstring''' import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __magic_name__ = logging.getLogger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """token-classification""" def __init__( self : Tuple ,_a : int ): '''simple docstring''' if type(_a ) == dict: A_ : str = Namespace(**_a ) A_ : Dict = import_module("""tasks""" ) try: A_ : Optional[Any] = getattr(_a ,hparams.task_type ) A_ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) A_ : Tuple = self.token_classification_task.get_labels(hparams.labels ) A_ : str = CrossEntropyLoss().ignore_index super().__init__(_a ,len(self.labels ) ,self.mode ) def _a ( self : int ,**_a : Any ): '''simple docstring''' return self.model(**_a ) def _a ( self : Optional[Any] ,_a : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": A_ : str = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids A_ : str = self(**_a ) A_ : Dict = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def _a ( self : Dict ): '''simple docstring''' A_ : Union[str, Any] = self.hparams for mode in ["train", "dev", "test"]: A_ : Union[str, Any] = self._feature_file(_a ) if os.path.exists(_a ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" ,_a ) A_ : Any = torch.load(_a ) else: logger.info("""Creating features from dataset file at %s""" ,args.data_dir ) A_ : List[str] = self.token_classification_task.read_examples_from_file(args.data_dir ,_a ) A_ : Optional[Any] = self.token_classification_task.convert_examples_to_features( _a ,self.labels ,args.max_seq_length ,self.tokenizer ,cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) ,cls_token=self.tokenizer.cls_token ,cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 ,sep_token=self.tokenizer.sep_token ,sep_token_extra=_a ,pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) ,pad_token=self.tokenizer.pad_token_id ,pad_token_segment_id=self.tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) logger.info("""Saving features into cached file %s""" ,_a ) torch.save(_a ,_a ) def _a ( self : Dict ,_a : int ,_a : int ,_a : bool = False ): '''simple docstring''' A_ : int = self._feature_file(_a ) logger.info("""Loading features from cached file %s""" ,_a ) A_ : str = torch.load(_a ) A_ : Union[str, Any] = torch.tensor([f.input_ids for f in features] ,dtype=torch.long ) A_ : List[Any] = torch.tensor([f.attention_mask for f in features] ,dtype=torch.long ) if features[0].token_type_ids is not None: A_ : int = torch.tensor([f.token_type_ids for f in features] ,dtype=torch.long ) else: A_ : Any = torch.tensor([0 for f in features] ,dtype=torch.long ) # HACK(we will not use this anymore soon) A_ : List[Any] = torch.tensor([f.label_ids for f in features] ,dtype=torch.long ) return DataLoader( TensorDataset(_a ,_a ,_a ,_a ) ,batch_size=_a ) def _a ( self : Optional[int] ,_a : List[str] ,_a : Tuple ): '''simple docstring''' """Compute validation""" "" A_ : List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": A_ : List[str] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids A_ : Union[str, Any] = self(**_a ) A_ , A_ : int = outputs[:2] A_ : List[Any] = logits.detach().cpu().numpy() A_ : int = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _a ( self : Optional[Any] ,_a : List[str] ): '''simple docstring''' A_ : Tuple = torch.stack([x["""val_loss"""] for x in outputs] ).mean() A_ : Optional[Any] = np.concatenate([x["""pred"""] for x in outputs] ,axis=0 ) A_ : List[str] = np.argmax(_a ,axis=2 ) A_ : Dict = np.concatenate([x["""target"""] for x in outputs] ,axis=0 ) A_ : Tuple = dict(enumerate(self.labels ) ) A_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] A_ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) A_ : Union[str, Any] = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(_a ,_a ), """precision""": precision_score(_a ,_a ), """recall""": recall_score(_a ,_a ), """f1""": fa_score(_a ,_a ), } A_ : Any = dict(results.items() ) A_ : Any = results return ret, preds_list, out_label_list def _a ( self : Optional[Any] ,_a : Any ): '''simple docstring''' A_ , A_ , A_ : Tuple = self._eval_end(_a ) A_ : int = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _a ( self : Tuple ,_a : List[Any] ): '''simple docstring''' A_ , A_ , A_ : Tuple = self._eval_end(_a ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 A_ : str = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _a ( _a : Tuple ,_a : str ): '''simple docstring''' BaseTransformer.add_model_specific_args(_a ,_a ) parser.add_argument( """--task_type""" ,default="""NER""" ,type=_a ,help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" ,default=128 ,type=_a ,help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) ,) parser.add_argument( """--labels""" ,default="""""" ,type=_a ,help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" ,) parser.add_argument( """--gpus""" ,default=0 ,type=_a ,help="""The number of GPUs allocated for this, it is by default 0 meaning none""" ,) parser.add_argument( """--overwrite_cache""" ,action="""store_true""" ,help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __magic_name__ = NERTransformer.add_model_specific_args(parser, os.getcwd()) __magic_name__ = parser.parse_args() __magic_name__ = NERTransformer(args) __magic_name__ = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __magic_name__ = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True)) __magic_name__ = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
27
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_text_model""" def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Optional[int] = intermediate_size A_ : Optional[int] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : int = max_position_embeddings A_ : str = hidden_act A_ : Union[str, Any] = layer_norm_eps A_ : Tuple = attention_dropout A_ : Union[str, Any] = initializer_range A_ : List[Any] = initializer_factor @classmethod def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : int = cls.get_config_dict(_a ,**_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_vision_model""" def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,): '''simple docstring''' super().__init__(**_a ) A_ : List[str] = hidden_size A_ : Union[str, Any] = intermediate_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : int = num_channels A_ : str = image_size A_ : List[Any] = patch_size A_ : int = hidden_act A_ : List[Any] = layer_norm_eps A_ : List[str] = attention_dropout A_ : str = initializer_range A_ : str = initializer_factor @classmethod def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit""" a_ = True def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,): '''simple docstring''' super().__init__(**_a ) if text_config is None: A_ : List[Any] = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) A_ : Dict = OwlViTTextConfig(**_a ) A_ : Dict = OwlViTVisionConfig(**_a ) A_ : Any = projection_dim A_ : Optional[int] = logit_scale_init_value A_ : Optional[int] = return_dict A_ : Dict = 1.0 @classmethod def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a ) if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) @classmethod def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ): '''simple docstring''' A_ : str = {} A_ : int = text_config A_ : Union[str, Any] = vision_config return cls.from_dict(_a ,**_a ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : Optional[int] = self.vision_config.to_dict() A_ : List[Any] = self.__class__.model_type return output class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : int ): '''simple docstring''' return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _a ( self : str ): '''simple docstring''' return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _a ( self : Optional[Any] ): '''simple docstring''' return 1e-4 def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs( processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a ) A_ : Any = super().generate_dummy_inputs( processor.image_processor ,batch_size=_a ,framework=_a ) return {**text_input_dict, **image_input_dict} @property def _a ( self : Optional[Any] ): '''simple docstring''' return 14
27
1
'''simple docstring''' import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """M-CLIP""" def __init__( self : int ,_a : str=1024 ,_a : str=768 ,**_a : Optional[Any] ): '''simple docstring''' A_ : Any = transformerDimSize A_ : Dict = imageDimSize super().__init__(**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = MCLIPConfig def __init__( self : Optional[Any] ,_a : List[Any] ,*_a : str ,**_a : List[Any] ): '''simple docstring''' super().__init__(_a ,*_a ,**_a ) A_ : Optional[Any] = XLMRobertaModel(_a ) A_ : Tuple = torch.nn.Linear( in_features=config.transformerDimensions ,out_features=config.numDims ) def _a ( self : int ,_a : int ,_a : Any ): '''simple docstring''' A_ : Any = self.transformer(input_ids=_a ,attention_mask=_a )[0] A_ : Any = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(_a ), embs
27
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""input_features""", """is_longer"""] def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,): '''simple docstring''' super().__init__( feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,) A_ : Tuple = top_db A_ : Tuple = truncation A_ : Optional[Any] = padding A_ : Optional[int] = fft_window_size A_ : Dict = (fft_window_size >> 1) + 1 A_ : Any = hop_length A_ : List[Any] = max_length_s A_ : Tuple = max_length_s * sampling_rate A_ : Tuple = sampling_rate A_ : Optional[int] = frequency_min A_ : Tuple = frequency_max A_ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,) A_ : Dict = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,) def _a ( self : int ): '''simple docstring''' A_ : int = copy.deepcopy(self.__dict__ ) A_ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ): '''simple docstring''' A_ : List[str] = spectrogram( _a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,) return log_mel_spectrogram.T def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A_ : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A_ : int = [0] # randomly choose index for each part A_ : List[str] = np.random.choice(ranges[0] ) A_ : int = np.random.choice(ranges[1] ) A_ : Optional[int] = np.random.choice(ranges[2] ) A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :] A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] A_ : Dict = mel[idx_back : idx_back + chunk_frames, :] A_ : Optional[int] = torch.tensor(mel[None, None, :] ) A_ : Dict = torch.nn.functional.interpolate( _a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a ) A_ : str = mel_shrink[0][0].numpy() A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": A_ : Dict = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A_ : Tuple = len(_a ) - max_length A_ : Optional[int] = np.random.randint(0 ,overflow + 1 ) A_ : List[Any] = waveform[idx : idx + max_length] A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A_ : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 ) A_ : str = False else: A_ : str = self._random_mel_fusion(_a ,_a ,_a ) A_ : Optional[Any] = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented' ) else: A_ : Optional[int] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A_ : int = int(max_length / len(_a ) ) A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A_ : List[str] = int(max_length / len(_a ) ) A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) ) A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,): '''simple docstring''' A_ : List[str] = truncation if truncation is not None else self.truncation A_ : List[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) A_ : int = is_batched_numpy or ( isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_a ,np.ndarray ): A_ : str = np.asarray(_a ,dtype=np.floataa ) elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Any = [np.asarray(_a )] # convert to mel spectrogram, truncate and pad if needed. A_ : str = [ self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a ) for waveform in raw_speech ] A_ : int = [] A_ : Any = [] for mel, longer in padded_inputs: input_mel.append(_a ) is_longer.append(_a ) if truncation == "fusion" and sum(_a ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A_ : List[Any] = np.random.randint(0 ,len(_a ) ) A_ : List[str] = True if isinstance(input_mel[0] ,_a ): A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A_ : List[str] = [[longer] for longer in is_longer] A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} A_ : int = BatchFeature(_a ) if return_tensors is not None: A_ : int = input_features.convert_to_tensors(_a ) return input_features
27
1
'''simple docstring''' import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( 'The `inpainting.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionInpaintPipeline` instead.' )
27
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : str = batch_size A_ : int = seq_length A_ : Union[str, Any] = is_training A_ : Optional[Any] = use_token_type_ids A_ : int = use_labels A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : int = intermediate_size A_ : Tuple = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : int = initializer_range A_ : Optional[Any] = num_labels A_ : str = num_choices A_ : Optional[Any] = scope A_ : List[Any] = self.vocab_size - 1 def _a ( self : Any ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : List[Any] = None if self.use_token_type_ids: A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : int = None A_ : str = None A_ : Union[str, Any] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Any = ids_tensor([self.batch_size] ,self.num_choices ) A_ : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = OpenAIGPTModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a ) A_ : str = model(_a ,token_type_ids=_a ) A_ : Dict = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ): '''simple docstring''' A_ : str = OpenAIGPTLMHeadModel(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ): '''simple docstring''' A_ : Any = OpenAIGPTDoubleHeadsModel(_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ): '''simple docstring''' A_ : List[str] = self.num_labels A_ : int = OpenAIGPTForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = config_and_inputs A_ : int = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) a_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly a_ = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ): '''simple docstring''' A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,) A_ : Any = inputs_dict["""labels"""] A_ : Any = inputs_dict["""labels"""] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,) A_ : int = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=_a ) return inputs_dict def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Tuple = OpenAIGPTModelTester(self ) A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a ) @slow def _a ( self : List[Any] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : List[str] ): '''simple docstring''' A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_a ) A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is A_ : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : int = model.generate(_a ,do_sample=_a ) self.assertListEqual(output_ids[0].tolist() ,_a )
27
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """FlavaImageProcessor""" a_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Optional[int] ,_a : List[Any]=None ,_a : int=None ,**_a : Tuple ): '''simple docstring''' A_ : Dict = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a ,_a ) A_ : Optional[Any] = self.image_processor def __call__( self : Tuple ,_a : Optional[ImageInput] = None ,_a : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = False ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: A_ : Optional[int] = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) if images is not None: A_ : Dict = self.image_processor( _a ,return_image_mask=_a ,return_codebook_pixels=_a ,return_tensors=_a ,**_a ,) if text is not None and images is not None: encoding.update(_a ) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_a ) ,tensor_type=_a ) def _a ( self : Optional[int] ,*_a : Tuple ,**_a : int ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : Union[str, Any] ,*_a : List[Any] ,**_a : List[Any] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : Tuple ): '''simple docstring''' A_ : Any = self.tokenizer.model_input_names A_ : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : str ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,) return self.image_processor_class @property def _a ( self : Union[str, Any] ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,) return self.image_processor
27
'''simple docstring''' import baseaa def lowerCamelCase ( lowerCamelCase : str): return baseaa.aaaencode(string.encode("""utf-8""")) def lowerCamelCase ( lowerCamelCase : bytes): return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""") if __name__ == "__main__": import doctest doctest.testmod()
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int): A_ : Optional[Any] = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def lowerCamelCase ( lowerCamelCase : int = 100): A_ : str = 1 A_ : int = 2 for i in range(2 , max_n + 1): A_ : Any = pre_numerator A_ : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1 A_ : str = cur_numerator A_ : str = e_cont * pre_numerator + temp return sum_digits(lowerCamelCase) if __name__ == "__main__": print(f"""{solution() = }""")
27
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowerCamelCase ( lowerCamelCase : Optional[Any]): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def lowerCamelCase ( lowerCamelCase : str): # word like '180' or '身高' or '神' for char in word: A_ : Optional[Any] = ord(lowerCamelCase) if not _is_chinese_char(lowerCamelCase): return 0 return 1 def lowerCamelCase ( lowerCamelCase : List[str]): A_ : Any = set() for token in tokens: A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase) if chinese_word: word_set.add(lowerCamelCase) A_ : Any = list(lowerCamelCase) return word_list def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()): if not chinese_word_set: return bert_tokens A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set]) A_ : str = bert_tokens A_ , A_ : Any = 0, len(lowerCamelCase) while start < end: A_ : Tuple = True if is_chinese(bert_word[start]): A_ : List[str] = min(end - start , lowerCamelCase) for i in range(lowerCamelCase , 1 , -1): A_ : Tuple = """""".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1 , start + i): A_ : Dict = """##""" + bert_word[j] A_ : str = start + i A_ : Dict = False break if single_word: start += 1 return bert_word def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer): A_ : Union[str, Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws A_ : int = [get_chinese_word(lowerCamelCase) for r in res] ltp_res.extend(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : List[Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512) bert_res.extend(res["""input_ids"""]) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Union[str, Any] = [] for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase): A_ : List[Any] = [] for id in input_ids: A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase) input_tokens.append(lowerCamelCase) A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase) A_ : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCamelCase): if token[:2] == "##": A_ : Optional[Any] = token[2:] # save chinese tokens' pos if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)): ref_id.append(lowerCamelCase) ref_ids.append(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) return ref_ids def lowerCamelCase ( lowerCamelCase : Tuple): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""") as f: A_ : Optional[int] = f.readlines() A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device A_ : Dict = BertTokenizer.from_pretrained(args.bert) A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase) with open(args.save_path , """w""" , encoding="""utf-8""") as f: A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids] f.writelines(lowerCamelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) __magic_name__ = parser.parse_args() main(args)
27
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : str ,_a : Dict ,): '''simple docstring''' A_ : Union[str, Any] = parent A_ : Union[str, Any] = 13 A_ : List[str] = 7 A_ : int = True A_ : Optional[int] = True A_ : Any = True A_ : Any = 99 A_ : Union[str, Any] = 32 A_ : Any = 2 A_ : Dict = 4 A_ : Tuple = 37 A_ : List[str] = """gelu""" A_ : Dict = 0.1 A_ : str = 0.1 A_ : int = 512 A_ : str = 16 A_ : Dict = 2 A_ : Dict = 0.02 A_ : Dict = 3 A_ : int = 4 A_ : List[str] = None def _a ( self : str ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : Union[str, Any] = None if self.use_input_mask: A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : List[Any] = None A_ : Optional[int] = None A_ : str = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) A_ : Any = EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : List[str] ): '''simple docstring''' ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = self.prepare_config_and_inputs() A_ : Optional[Any] = True A_ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A_ : int = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _a ( self : Any ,_a : List[str] ,_a : str ,_a : Dict ,_a : Optional[Any] ,_a : Tuple ,_a : Union[str, Any] ): '''simple docstring''' A_ : str = TFEsmModel(config=_a ) A_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} A_ : Tuple = model(_a ) A_ : Union[str, Any] = [input_ids, input_mask] A_ : Dict = model(_a ) A_ : List[str] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : List[Any] ,_a : str ,_a : int ,_a : List[str] ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : int ,): '''simple docstring''' A_ : Dict = True A_ : Tuple = TFEsmModel(config=_a ) A_ : Dict = { """input_ids""": input_ids, """attention_mask""": input_mask, """encoder_hidden_states""": encoder_hidden_states, """encoder_attention_mask""": encoder_attention_mask, } A_ : Tuple = model(_a ) A_ : Union[str, Any] = [input_ids, input_mask] A_ : Tuple = model(_a ,encoder_hidden_states=_a ) # Also check the case where encoder outputs are not passed A_ : Union[str, Any] = model(_a ,attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : str ,_a : List[str] ,_a : List[Any] ,_a : Any ,_a : List[Any] ,_a : Dict ,_a : Any ): '''simple docstring''' A_ : Optional[Any] = TFEsmForMaskedLM(config=_a ) A_ : List[str] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Optional[int] ,_a : str ,_a : int ,_a : List[Any] ,_a : Union[str, Any] ,_a : Any ,_a : List[Any] ): '''simple docstring''' A_ : List[Any] = self.num_labels A_ : Optional[int] = TFEsmForTokenClassification(config=_a ) A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask} A_ : List[str] = model(_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : str ): '''simple docstring''' A_ : str = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Dict = config_and_inputs A_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) a_ = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) a_ = False a_ = False def _a ( self : List[Any] ): '''simple docstring''' A_ : Tuple = TFEsmModelTester(self ) A_ : Union[str, Any] = ConfigTester(self ,config_class=_a ,hidden_size=37 ) def _a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self : Any ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def _a ( self : Any ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) @slow def _a ( self : List[Any] ): '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Union[str, Any] = TFEsmModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @unittest.skip("""Protein models do not support embedding resizing.""" ) def _a ( self : str ): '''simple docstring''' pass @unittest.skip("""Protein models do not support embedding resizing.""" ) def _a ( self : Optional[int] ): '''simple docstring''' pass def _a ( self : Union[str, Any] ): '''simple docstring''' A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(_a ) assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A_ : Dict = model.get_bias() assert isinstance(_a ,_a ) for k, v in name.items(): assert isinstance(_a ,tf.Variable ) else: A_ : str = model.get_output_embeddings() assert x is None A_ : Any = model.get_bias() assert name is None @require_tf class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : Optional[int] ): '''simple docstring''' A_ : List[str] = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) A_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ : int = model(_a )[0] A_ : int = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) ,_a ) # compare the actual values for a slice. A_ : List[Any] = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) ) @slow def _a ( self : List[str] ): '''simple docstring''' A_ : Tuple = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) A_ : str = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A_ : Optional[int] = model(_a )[0] # compare the actual values for a slice. A_ : Union[str, Any] = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
27
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """ViltImageProcessor""" a_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ): '''simple docstring''' A_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a ,_a ) A_ : Optional[Any] = self.image_processor def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,): '''simple docstring''' A_ : int = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) # add pixel_values + pixel_mask A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a ) encoding.update(_a ) return encoding def _a ( self : List[Any] ,*_a : Any ,**_a : Any ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : int ,*_a : int ,**_a : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = self.tokenizer.model_input_names A_ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : str ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,) return self.image_processor_class @property def _a ( self : int ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,) return self.image_processor
27
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __magic_name__ = None __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __magic_name__ = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json' ), }, } __magic_name__ = { 'moussaKam/mbarthez': 1_024, 'moussaKam/barthez': 1_024, 'moussaKam/barthez-orangesum-title': 1_024, } __magic_name__ = '▁' class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = BarthezTokenizer def __init__( self : List[str] ,_a : Dict=None ,_a : Optional[int]=None ,_a : int="<s>" ,_a : Dict="</s>" ,_a : Union[str, Any]="</s>" ,_a : int="<s>" ,_a : int="<unk>" ,_a : int="<pad>" ,_a : Union[str, Any]="<mask>" ,**_a : int ,): '''simple docstring''' A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token super().__init__( _a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,**_a ,) A_ : Any = vocab_file A_ : Any = False if not self.vocab_file else True def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : int = [self.cls_token_id] A_ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Optional[Any] = [self.sep_token_id] A_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : Optional[Any] ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : Optional[Any] = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file ,_a ) return (out_vocab_file,)
27
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""torch""", """torchsde"""] def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ): '''simple docstring''' requires_backends(self ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] )
27
1
'''simple docstring''' from math import ceil, sqrt def lowerCamelCase ( lowerCamelCase : int = 100_0000): A_ : Tuple = 0 for outer_width in range(3 , (limit // 4) + 2): if outer_width**2 > limit: A_ : List[str] = max(ceil(sqrt(outer_width**2 - limit)) , 1) else: A_ : Optional[int] = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
27
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"): A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {} A_ : Optional[int] = padding_side return tokenizer( [line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , ) def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ): A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,): '''simple docstring''' super().__init__() A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" ) A_ : Any = Path(_a ).joinpath(type_path + """.target""" ) A_ : Dict = self.get_char_lens(self.src_file ) A_ : Optional[int] = max_source_length A_ : List[str] = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' A_ : List[Any] = tokenizer A_ : Optional[Any] = prefix if n_obs is not None: A_ : Any = self.src_lens[:n_obs] A_ : Optional[int] = src_lang A_ : Tuple = tgt_lang def __len__( self : Tuple ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[str] ,_a : Tuple ): '''simple docstring''' A_ : int = index + 1 # linecache starts at 1 A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" ) A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ : List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer ) A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" ) A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" ) A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze() A_ : Dict = target_inputs["""input_ids"""].squeeze() A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _a ( _a : int ): '''simple docstring''' return [len(_a ) for x in Path(_a ).open().readlines()] def _a ( self : Optional[int] ,_a : Dict ): '''simple docstring''' A_ : str = torch.stack([x["""input_ids"""] for x in batch] ) A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] ) A_ : Union[str, Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : List[str] = trim_batch(_a ,_a ) A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a ) A_ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __magic_name__ = getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[List]): return list(itertools.chain.from_iterable(lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : str): A_ : Union[str, Any] = get_git_info() save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json""")) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]): with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Any): with open(lowerCamelCase) as f: return json.load(lowerCamelCase) def lowerCamelCase ( ): A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase) A_ : Union[str, Any] = { """repo_id""": str(lowerCamelCase), """repo_sha""": str(repo.head.object.hexsha), """repo_branch""": str(repo.active_branch), """hostname""": str(socket.gethostname()), } return repo_infos def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable): return list(map(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]): with open(lowerCamelCase , """wb""") as f: return pickle.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str]): def remove_articles(lowerCamelCase : Any): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase) def white_space_fix(lowerCamelCase : List[Any]): return " ".join(text.split()) def remove_punc(lowerCamelCase : Union[str, Any]): A_ : Optional[int] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(lowerCamelCase : List[str]): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase)))) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): A_ : Tuple = normalize_answer(lowerCamelCase).split() A_ : Dict = normalize_answer(lowerCamelCase).split() A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase) A_ : Any = sum(common.values()) if num_same == 0: return 0 A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any): return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]): assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Any = 0 for hypo, pred in zip(lowerCamelCase , lowerCamelCase): em += exact_match_score(lowerCamelCase , lowerCamelCase) if len(lowerCamelCase) > 0: em /= len(lowerCamelCase) return {"em": em} def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return model_prefix.startswith("""rag""") def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]): A_ : Optional[Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ : Tuple = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase): if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) continue A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p] setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) return hparams, config
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : List[Any]): # noqa: E741 A_ : Optional[int] = len(lowerCamelCase) A_ : Tuple = 0 A_ : Tuple = [0] * n A_ : Dict = [False] * n A_ : int = [False] * n def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]): if parent == root: out_edge_count += 1 A_ : Optional[int] = True A_ : Any = at for to in l[at]: if to == parent: pass elif not visited[to]: A_ : str = dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) A_ : Any = min(low[at] , low[to]) # AP found via bridge if at < low[to]: A_ : int = True # AP found via cycle if at == low[to]: A_ : List[Any] = True else: A_ : Any = min(low[at] , lowerCamelCase) return out_edge_count for i in range(lowerCamelCase): if not visited[i]: A_ : Tuple = 0 A_ : int = dfs(lowerCamelCase , lowerCamelCase , -1 , lowerCamelCase) A_ : Union[str, Any] = out_edge_count > 1 for x in range(len(lowerCamelCase)): if is_art[x] is True: print(lowerCamelCase) # Adjacency list of graph __magic_name__ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): return price * (1 + tax_rate) if __name__ == "__main__": print(f"""{price_plus_tax(100, 0.2_5) = }""") print(f"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = KandinskyVaaControlnetPipeline a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a_ = False @property def _a ( self : Any ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return self.time_input_dim @property def _a ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): '''simple docstring''' return 100 @property def _a ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[Any] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } A_ : Tuple = UNetaDConditionModel(**_a ) return model @property def _a ( self : List[str] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _a ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) A_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : List[str] ): '''simple docstring''' A_ : Optional[Any] = self.dummy_unet A_ : int = self.dummy_movq A_ : Tuple = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,) A_ : int = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ): '''simple docstring''' A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a ) A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( _a ) # create hint A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("""mps""" ): A_ : Optional[Any] = torch.manual_seed(_a ) else: A_ : str = torch.Generator(device=_a ).manual_seed(_a ) A_ : List[Any] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def _a ( self : Dict ): '''simple docstring''' A_ : List[Any] = """cpu""" A_ : List[str] = self.get_dummy_components() A_ : Tuple = self.pipeline_class(**_a ) A_ : Dict = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) ) A_ : Tuple = output.images A_ : Optional[Any] = pipe( **self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0] A_ : Tuple = image[0, -3:, -3:, -1] A_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[Any] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Any ): '''simple docstring''' A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) A_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0 A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(_a ) A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) A_ : Union[str, Any] = pipeline.to(_a ) pipeline.set_progress_bar_config(disable=_a ) A_ : Optional[Any] = """A robot, 4k photo""" A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ , A_ : List[str] = pipe_prior( _a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ : List[Any] = pipeline( image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,) A_ : Dict = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_a ,_a )
27
1
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan __magic_name__ = 6_3_7_8_1_3_7.0 __magic_name__ = 6_3_5_6_7_5_2.3_1_4_2_4_5 __magic_name__ = 6_378_137 def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float): A_ : List[Any] = (AXIS_A - AXIS_B) / AXIS_A A_ : int = atan((1 - flattening) * tan(radians(lowerCamelCase))) A_ : List[str] = atan((1 - flattening) * tan(radians(lowerCamelCase))) A_ : int = radians(lowerCamelCase) A_ : List[Any] = radians(lowerCamelCase) # Equation A_ : Any = sin((phi_a - phi_a) / 2) A_ : int = sin((lambda_a - lambda_a) / 2) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda A_ : List[str] = sqrt(sin_sq_phi + (cos(lowerCamelCase) * cos(lowerCamelCase) * sin_sq_lambda)) return 2 * RADIUS * asin(lowerCamelCase) if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """deberta-v2""" def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,): '''simple docstring''' super().__init__(**_a ) A_ : Union[str, Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[Any] = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : List[Any] = initializer_range A_ : int = relative_attention A_ : Tuple = max_relative_positions A_ : int = pad_token_id A_ : Tuple = position_biased_input # Backwards compatibility if type(_a ) == str: A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )] A_ : Any = pos_att_type A_ : Optional[int] = vocab_size A_ : Tuple = layer_norm_eps A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a ) A_ : Union[str, Any] = pooler_dropout A_ : List[Any] = pooler_hidden_act class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : Any = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def _a ( self : Optional[int] ): '''simple docstring''' return 12 def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
27
1
'''simple docstring''' import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""input_values""", """attention_mask"""] def __init__( self : int ,_a : int = 1 ,_a : int = 16000 ,_a : float = 0.0 ,_a : bool = False ,_a : int = 80 ,_a : int = 16 ,_a : int = 64 ,_a : str = "hann_window" ,_a : float = 1.0 ,_a : float = 80 ,_a : float = 7600 ,_a : float = 1e-10 ,_a : int = 2 ,_a : bool = True ,**_a : str ,): '''simple docstring''' super().__init__(feature_size=_a ,sampling_rate=_a ,padding_value=_a ,**_a ) A_ : Any = do_normalize A_ : Union[str, Any] = return_attention_mask A_ : Optional[Any] = num_mel_bins A_ : str = hop_length A_ : Dict = win_length A_ : Dict = win_function A_ : List[Any] = frame_signal_scale A_ : List[str] = fmin A_ : List[str] = fmax A_ : Any = mel_floor A_ : Optional[Any] = reduction_factor A_ : List[Any] = win_length * sampling_rate // 1000 A_ : List[Any] = hop_length * sampling_rate // 1000 A_ : List[str] = optimal_fft_length(self.sample_size ) A_ : Optional[int] = (self.n_fft // 2) + 1 A_ : Dict = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=_a ) A_ : Tuple = mel_filter_bank( num_frequency_bins=self.n_freqs ,num_mel_filters=self.num_mel_bins ,min_frequency=self.fmin ,max_frequency=self.fmax ,sampling_rate=self.sampling_rate ,norm="""slaney""" ,mel_scale="""slaney""" ,) if frame_signal_scale != 1.0: warnings.warn( """The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" ,_a ,) if reduction_factor != 2.0: warnings.warn( """The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" ,_a ,) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _a ( _a : List[np.ndarray] ,_a : List[np.ndarray] ,_a : float = 0.0 ): '''simple docstring''' if attention_mask is not None: A_ : Optional[Any] = np.array(_a ,np.intaa ) A_ : List[str] = [] for vector, length in zip(_a ,attention_mask.sum(-1 ) ): A_ : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: A_ : Union[str, Any] = padding_value normed_input_values.append(_a ) else: A_ : Optional[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def _a ( self : List[Any] ,_a : np.ndarray ,): '''simple docstring''' A_ : Tuple = spectrogram( _a ,window=self.window ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,mel_filters=self.mel_filters ,mel_floor=self.mel_floor ,log_mel="""log10""" ,) return log_mel_spec.T def __call__( self : Optional[Any] ,_a : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None ,_a : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Optional[int] = None ,_a : bool = False ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[int] = None ,**_a : int ,): '''simple docstring''' if audio is None and audio_target is None: raise ValueError("""You must provide either `audio` or `audio_target` values.""" ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if audio is not None: A_ : Optional[int] = self._process_audio( _a ,_a ,_a ,_a ,_a ,_a ,_a ,_a ,**_a ,) else: A_ : Union[str, Any] = None if audio_target is not None: A_ : str = self._process_audio( _a ,_a ,_a ,_a ,_a ,_a ,_a ,_a ,**_a ,) if inputs is None: return inputs_target else: A_ : Optional[int] = inputs_target["""input_values"""] A_ : Tuple = inputs_target.get("""attention_mask""" ) if decoder_attention_mask is not None: A_ : Optional[int] = decoder_attention_mask return inputs def _a ( self : Union[str, Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : bool = False ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Optional[int] = None ,_a : bool = False ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,): '''simple docstring''' A_ : Optional[Any] = isinstance(_a ,np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) A_ : Dict = is_batched_numpy or ( isinstance(_a ,(list, tuple) ) and (isinstance(speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: A_ : Optional[Any] = [np.asarray(_a ,dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_a ,np.ndarray ): A_ : Union[str, Any] = np.asarray(_a ,dtype=np.floataa ) elif isinstance(_a ,np.ndarray ) and speech.dtype is np.dtype(np.floataa ): A_ : Optional[int] = speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Optional[Any] = [speech] # needed to make pad() work on spectrogram inputs A_ : Dict = self.feature_size # convert into correct format for padding if is_target: A_ : Dict = [self._extract_mel_features(_a ) for waveform in speech] A_ : Optional[int] = BatchFeature({"""input_values""": features} ) A_ : Optional[int] = self.num_mel_bins else: A_ : List[str] = BatchFeature({"""input_values""": speech} ) A_ : int = self.pad( _a ,padding=_a ,max_length=_a ,truncation=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,**_a ,) A_ : str = feature_size_hack # convert input values to correct format A_ : List[Any] = padded_inputs["""input_values"""] if not isinstance(input_values[0] ,np.ndarray ): A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for array in input_values] elif ( not isinstance(_a ,np.ndarray ) and isinstance(input_values[0] ,np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): A_ : Union[str, Any] = [array.astype(np.floataa ) for array in input_values] elif isinstance(_a ,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): A_ : int = input_values.astype(np.floataa ) # convert attention_mask to correct format A_ : List[Any] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: A_ : Any = [np.asarray(_a ,dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: A_ : Tuple = ( attention_mask if self._get_padding_strategies(_a ,max_length=_a ) is not PaddingStrategy.DO_NOT_PAD else None ) A_ : List[str] = self.zero_mean_unit_var_norm( padded_inputs["""input_values"""] ,attention_mask=_a ,padding_value=self.padding_value ) if return_tensors is not None: A_ : int = padded_inputs.convert_to_tensors(_a ) return padded_inputs def _a ( self : Any ): '''simple docstring''' A_ : Dict = super().to_dict() # Don't serialize these as they are derived from the other properties. A_ : List[Any] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""] for name in names: if name in output: del output[name] return output
27
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') __magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) __magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) __magic_name__ = BeautifulSoup(res.text, 'html.parser') __magic_name__ = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(f"""https://google.com{link.get('href')}""")
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available __magic_name__ = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' from ... import PretrainedConfig __magic_name__ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = """nezha""" def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Tuple = hidden_act A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Optional[Any] = max_relative_position A_ : List[Any] = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : Dict = classifier_dropout A_ : int = use_cache
27
1
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) set_seed(770) __magic_name__ = { 'c_attn': 'att_proj', 'c_proj': 'out_proj', 'c_fc': 'in_proj', 'transformer.': '', 'h.': 'layers.', 'ln_1': 'layernorm_1', 'ln_2': 'layernorm_2', 'ln_f': 'layernorm_final', 'wpe': 'position_embeds_layer', 'wte': 'input_embeds_layer', } __magic_name__ = { 'text_small': { 'repo_id': 'suno/bark', 'file_name': 'text.pt', }, 'coarse_small': { 'repo_id': 'suno/bark', 'file_name': 'coarse.pt', }, 'fine_small': { 'repo_id': 'suno/bark', 'file_name': 'fine.pt', }, 'text': { 'repo_id': 'suno/bark', 'file_name': 'text_2.pt', }, 'coarse': { 'repo_id': 'suno/bark', 'file_name': 'coarse_2.pt', }, 'fine': { 'repo_id': 'suno/bark', 'file_name': 'fine_2.pt', }, } __magic_name__ = os.path.dirname(os.path.abspath(__file__)) __magic_name__ = os.path.join(os.path.expanduser('~'), '.cache') __magic_name__ = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0') def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : str=False): A_ : Union[str, Any] = model_type if use_small: key += "_small" return os.path.join(lowerCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""]) def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple): os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase) hf_hub_download(repo_id=lowerCamelCase , filename=lowerCamelCase , local_dir=lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=False , lowerCamelCase : Tuple="text"): if model_type == "text": A_ : Dict = BarkSemanticModel A_ : Dict = BarkSemanticConfig A_ : List[Any] = BarkSemanticGenerationConfig elif model_type == "coarse": A_ : str = BarkCoarseModel A_ : Any = BarkCoarseConfig A_ : Union[str, Any] = BarkCoarseGenerationConfig elif model_type == "fine": A_ : int = BarkFineModel A_ : Optional[Any] = BarkFineConfig A_ : List[str] = BarkFineGenerationConfig else: raise NotImplementedError() A_ : Any = F'{model_type}_small' if use_small else model_type A_ : Optional[Any] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(lowerCamelCase): logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.') _download(model_info["""repo_id"""] , model_info["""file_name"""]) A_ : int = torch.load(lowerCamelCase , map_location=lowerCamelCase) # this is a hack A_ : Any = checkpoint["""model_args"""] if "input_vocab_size" not in model_args: A_ : List[str] = model_args["""vocab_size"""] A_ : List[Any] = model_args["""vocab_size"""] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments A_ : str = model_args.pop("""n_head""") A_ : List[str] = model_args.pop("""n_embd""") A_ : List[str] = model_args.pop("""n_layer""") A_ : Optional[int] = ConfigClass(**checkpoint["""model_args"""]) A_ : List[str] = ModelClass(config=lowerCamelCase) A_ : int = GenerationConfigClass() A_ : List[Any] = model_generation_config A_ : Any = checkpoint["""model"""] # fixup checkpoint A_ : List[str] = """_orig_mod.""" for k, v in list(state_dict.items()): if k.startswith(lowerCamelCase): # replace part of the key with corresponding layer name in HF implementation A_ : Union[str, Any] = k[len(lowerCamelCase) :] for old_layer_name in new_layer_name_dict: A_ : List[str] = new_k.replace(lowerCamelCase , new_layer_name_dict[old_layer_name]) A_ : Union[str, Any] = state_dict.pop(lowerCamelCase) A_ : Any = set(state_dict.keys()) - set(model.state_dict().keys()) A_ : Dict = {k for k in extra_keys if not k.endswith(""".attn.bias""")} A_ : str = set(model.state_dict().keys()) - set(state_dict.keys()) A_ : Optional[Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""")} if len(lowerCamelCase) != 0: raise ValueError(F'extra keys found: {extra_keys}') if len(lowerCamelCase) != 0: raise ValueError(F'missing keys: {missing_keys}') model.load_state_dict(lowerCamelCase , strict=lowerCamelCase) A_ : int = model.num_parameters(exclude_embeddings=lowerCamelCase) A_ : Optional[Any] = checkpoint["""best_val_loss"""].item() logger.info(F'model loaded: {round(n_params/1E6 , 1)}M params, {round(lowerCamelCase , 3)} loss') model.eval() model.to(lowerCamelCase) del checkpoint, state_dict return model def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Dict=False , lowerCamelCase : str="text"): if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() A_ : Tuple = """cpu""" # do conversion on cpu A_ : str = _get_ckpt_path(lowerCamelCase , use_small=lowerCamelCase) A_ : Optional[Any] = _load_model(lowerCamelCase , lowerCamelCase , model_type=lowerCamelCase , use_small=lowerCamelCase) # load bark initial model A_ : Optional[Any] = _bark_load_model(lowerCamelCase , """cpu""" , model_type=lowerCamelCase , use_small=lowerCamelCase) if model_type == "text": A_ : Optional[int] = bark_model["""model"""] if model.num_parameters(exclude_embeddings=lowerCamelCase) != bark_model.get_num_params(): raise ValueError("""initial and new models don't have the same number of parameters""") # check if same output as the bark model A_ : Dict = 5 A_ : Dict = 10 if model_type in ["text", "coarse"]: A_ : Optional[int] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int) A_ : Optional[int] = bark_model(lowerCamelCase)[0] A_ : str = model(lowerCamelCase) # take last logits A_ : List[str] = output_new_model_total.logits[:, [-1], :] else: A_ : Tuple = 3 A_ : Tuple = 8 A_ : Tuple = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int) A_ : int = model(lowerCamelCase , lowerCamelCase) A_ : Any = bark_model(lowerCamelCase , lowerCamelCase) A_ : Dict = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("""initial and new outputs don't have the same shape""") if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError("""initial and new outputs are not equal""") Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Dict , ): A_ : List[str] = os.path.join(lowerCamelCase , lowerCamelCase) A_ : Optional[int] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""")) A_ : int = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""")) A_ : Any = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase , """config.json""")) A_ : Optional[Any] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""") A_ : Any = BarkSemanticModel.from_pretrained(lowerCamelCase) A_ : Optional[Any] = BarkCoarseModel.from_pretrained(lowerCamelCase) A_ : Dict = BarkFineModel.from_pretrained(lowerCamelCase) A_ : str = EncodecModel.from_pretrained("""facebook/encodec_24khz""") A_ : Any = BarkConfig.from_sub_model_configs( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) A_ : List[Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config) A_ : str = BarkModel(lowerCamelCase) A_ : int = semantic A_ : Tuple = coarseAcoustic A_ : Any = fineAcoustic A_ : Tuple = codec A_ : List[Any] = bark_generation_config Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) bark.save_pretrained(lowerCamelCase , repo_id=lowerCamelCase , push_to_hub=lowerCamelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument('model_type', type=str, help='text, coarse or fine.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.') __magic_name__ = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
27
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str): A_ , A_ : List[Any] = set(lowerCamelCase), [start] while stack: A_ : Optional[Any] = stack.pop() explored.add(lowerCamelCase) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(lowerCamelCase) return explored __magic_name__ = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
27
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""note_seq"""] def __init__( self : int ,*_a : Optional[int] ,**_a : Any ): '''simple docstring''' requires_backends(self ,["""note_seq"""] ) @classmethod def _a ( cls : Optional[int] ,*_a : Optional[Any] ,**_a : Optional[Any] ): '''simple docstring''' requires_backends(cls ,["""note_seq"""] ) @classmethod def _a ( cls : Any ,*_a : Any ,**_a : List[str] ): '''simple docstring''' requires_backends(cls ,["""note_seq"""] )
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Dict): A_ : List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: A_ : Union[str, Any] = [144, 192, 240] A_ : int = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: A_ : List[str] = [96, 120, 144] A_ : Any = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: A_ : Any = [64, 80, 96] A_ : List[str] = [16, 16, 24, 48, 64, 80, 320] A_ : Any = 0.05 A_ : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_"""): A_ : int = 512 A_ : Optional[int] = 16 A_ : List[Any] = 21 A_ : List[str] = """pascal-voc-id2label.json""" else: A_ : str = 1000 A_ : Any = """imagenet-1k-id2label.json""" A_ : Any = """huggingface/label-files""" A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r""")) A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False): for i in range(1 , 6): if F'layer_{i}.' in name: A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.') if "conv_1." in name: A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""") if ".block." in name: A_ : Optional[Any] = name.replace(""".block.""" , """.""") if "exp_1x1" in name: A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""") if "red_1x1" in name: A_ : int = name.replace("""red_1x1""" , """reduce_1x1""") if ".local_rep.conv_3x3." in name: A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""") if ".local_rep.conv_1x1." in name: A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""") if ".norm." in name: A_ : Tuple = name.replace(""".norm.""" , """.normalization.""") if ".conv." in name: A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""") if ".conv_proj." in name: A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""") for i in range(0 , 2): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.') if "expand_1x1" in name: A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""") if "conv_3x3" in name: A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""") if "reduce_1x1" in name: A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""") for i in range(2 , 5): if F'.global_rep.{i}.weight' in name: A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""") if F'.global_rep.{i}.bias' in name: A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""") if ".global_rep." in name: A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""") if ".pre_norm_mha.0." in name: A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""") if ".pre_norm_mha.1.out_proj." in name: A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""") if ".pre_norm_ffn.0." in name: A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""") if ".pre_norm_ffn.1." in name: A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""") if ".pre_norm_ffn.4." in name: A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""") if ".transformer." in name: A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""") if ".aspp_layer." in name: A_ : int = name.replace(""".aspp_layer.""" , """.""") if ".aspp_pool." in name: A_ : Tuple = name.replace(""".aspp_pool.""" , """.""") if "seg_head." in name: A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""") if "segmentation_head.classifier.classifier." in name: A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""") if "classifier.fc." in name: A_ : str = name.replace("""classifier.fc.""" , """classifier.""") elif (not base_model) and ("segmentation_head." not in name): A_ : str = """mobilevit.""" + name return name def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False): if base_model: A_ : Dict = """""" else: A_ : Any = """mobilevit.""" for key in orig_state_dict.copy().keys(): A_ : List[Any] = orig_state_dict.pop(lowerCamelCase) if key[:8] == "encoder.": A_ : int = key[8:] if "qkv" in key: A_ : Any = key.split(""".""") A_ : str = int(key_split[0][6:]) - 1 A_ : int = int(key_split[3]) A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}') A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size A_ : Optional[Any] = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: A_ : Dict = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : List[Any] = val[-dim:, :] else: A_ : Optional[Any] = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : Any = val[-dim:] else: A_ : List[str] = val return orig_state_dict def lowerCamelCase ( ): A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw) return im @torch.no_grad() def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False): A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase) # load original state_dict A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""") # load 🤗 model if mobilevit_name.startswith("""deeplabv3_"""): A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval() else: A_ : str = MobileViTForImageClassification(lowerCamelCase).eval() A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase) model.load_state_dict(lowerCamelCase) # Check outputs on an image, prepared by MobileViTImageProcessor A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") A_ : List[Any] = model(**lowerCamelCase) A_ : Dict = outputs.logits if mobilevit_name.startswith("""deeplabv3_"""): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": A_ : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": A_ : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": A_ : Tuple = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4) Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase) print(F'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase) if push_to_hub: A_ : str = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""") A_ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase , organization="""apple""") model.push_to_hub(lowerCamelCase , organization="""apple""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __magic_name__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
27
1
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __magic_name__ = random.Random() def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : List[str]=1.0 , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[str]=None): if rng is None: A_ : Dict = global_rng A_ : List[str] = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[Any] ,_a : Optional[Any] ,_a : str=7 ,_a : int=400 ,_a : List[str]=2000 ,_a : Any=2048 ,_a : Any=128 ,_a : int=1 ,_a : Optional[int]=512 ,_a : Union[str, Any]=30 ,_a : Any=44100 ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : str = batch_size A_ : str = min_seq_length A_ : Union[str, Any] = max_seq_length A_ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) A_ : Union[str, Any] = spectrogram_length A_ : Optional[int] = feature_size A_ : Tuple = num_audio_channels A_ : List[str] = hop_length A_ : Union[str, Any] = chunk_length A_ : Tuple = sampling_rate def _a ( self : Dict ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def _a ( self : int ,_a : List[Any]=False ,_a : List[str]=False ): '''simple docstring''' def _flatten(_a : int ): return list(itertools.chain(*_a ) ) if equal_length: A_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size A_ : str = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: A_ : Tuple = [np.asarray(_a ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = TvltFeatureExtractor def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = TvltFeatureExtractionTester(self ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_a ,"""spectrogram_length""" ) ) self.assertTrue(hasattr(_a ,"""feature_size""" ) ) self.assertTrue(hasattr(_a ,"""num_audio_channels""" ) ) self.assertTrue(hasattr(_a ,"""hop_length""" ) ) self.assertTrue(hasattr(_a ,"""chunk_length""" ) ) self.assertTrue(hasattr(_a ,"""sampling_rate""" ) ) def _a ( self : str ): '''simple docstring''' A_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A_ : List[Any] = feat_extract_first.save_pretrained(_a )[0] check_json_file_has_correct_format(_a ) A_ : Any = self.feature_extraction_class.from_pretrained(_a ) A_ : Any = feat_extract_first.to_dict() A_ : List[str] = feat_extract_second.to_dict() A_ : Union[str, Any] = dict_first.pop("""mel_filters""" ) A_ : str = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(_a ,_a ) ) self.assertEqual(_a ,_a ) def _a ( self : int ): '''simple docstring''' A_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A_ : Optional[Any] = os.path.join(_a ,"""feat_extract.json""" ) feat_extract_first.to_json_file(_a ) A_ : Optional[Any] = self.feature_extraction_class.from_json_file(_a ) A_ : Optional[int] = feat_extract_first.to_dict() A_ : int = feat_extract_second.to_dict() A_ : Optional[Any] = dict_first.pop("""mel_filters""" ) A_ : List[str] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(_a ,_a ) ) self.assertEqual(_a ,_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ : Any = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 A_ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )] A_ : Optional[Any] = [np.asarray(_a ) for speech_input in speech_inputs] # Test not batched input A_ : Union[str, Any] = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched A_ : Union[str, Any] = feature_extractor(_a ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking A_ : str = feature_extractor( _a ,return_tensors="""np""" ,sampling_rate=44100 ,mask_audio=_a ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. A_ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)] A_ : str = np.asarray(_a ) A_ : Tuple = feature_extractor(_a ,return_tensors="""np""" ,sampling_rate=44100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def _a ( self : Tuple ,_a : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech A_ : Any = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self._load_datasamples(1 ) A_ : Dict = TvltFeatureExtractor() A_ : int = feature_extractor(_a ,return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape ,(1, 1, 192, 128) ) A_ : Any = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,_a ,atol=1e-4 ) )
27
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __magic_name__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""pixel_values"""] def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,): '''simple docstring''' super().__init__(**_a ) A_ : Tuple = size if size is not None else {"""shortest_edge""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" ) A_ : Any = do_resize A_ : List[str] = size A_ : Union[str, Any] = resample A_ : Dict = do_center_crop A_ : List[str] = crop_size A_ : Any = do_rescale A_ : Union[str, Any] = rescale_factor A_ : Any = do_normalize A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Tuple = do_convert_rgb def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,): '''simple docstring''' A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a ) return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a ) def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,): '''simple docstring''' A_ : Optional[int] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,): '''simple docstring''' return rescale(_a ,scale=_a ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,): '''simple docstring''' return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a ) def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,): '''simple docstring''' A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A_ : Tuple = size if size is not None else self.size A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a ) A_ : List[str] = resample if resample is not None else self.resample A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a ) A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Any = do_normalize if do_normalize is not None else self.do_normalize A_ : int = image_mean if image_mean is not None else self.image_mean A_ : int = image_std if image_std is not None else self.image_std A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : int = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : Optional[int] = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. A_ : Dict = [to_numpy_array(_a ) for image in images] if do_resize: A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images] if do_center_crop: A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images] if do_normalize: A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images] A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images] A_ : List[str] = {"""pixel_values""": images} return BatchFeature(data=_a ,tensor_type=_a )
27
1
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_a : Dict ): '''simple docstring''' super().__init__() A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a ) A_ : int = list(model.children() )[:-2] A_ : int = nn.Sequential(*_a ) A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : str ,_a : Optional[int] ): '''simple docstring''' A_ : Tuple = self.pool(self.model(_a ) ) A_ : Any = torch.flatten(_a ,start_dim=2 ) A_ : str = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ): '''simple docstring''' A_ : Dict = [json.loads(_a ) for l in open(_a )] A_ : Optional[int] = os.path.dirname(_a ) A_ : Optional[Any] = tokenizer A_ : Optional[Any] = labels A_ : List[Any] = len(_a ) A_ : str = max_seq_length A_ : str = transforms def __len__( self : str ): '''simple docstring''' return len(self.data ) def __getitem__( self : Tuple ,_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) ) A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1] A_ : Optional[int] = sentence[: self.max_seq_length] A_ : Any = torch.zeros(self.n_classes ) A_ : Tuple = 1 A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" ) A_ : Union[str, Any] = self.transforms(_a ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : List[Any] ): '''simple docstring''' A_ : str = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( lowerCamelCase : str): A_ : List[Any] = [len(row["""sentence"""]) for row in batch] A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase) A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)): A_ : str = input_row["""sentence"""] A_ : Tuple = 1 A_ : int = torch.stack([row["""image"""] for row in batch]) A_ : str = torch.stack([row["""label"""] for row in batch]) A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch]) A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ])
27
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ): '''simple docstring''' warnings.warn( """The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use OwlViTImageProcessor instead.""" ,_a ,) super().__init__(*_a ,**_a )
27
1
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """char""" a_ = """bpe""" a_ = """wp""" __magic_name__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """char_tokenizer"""] a_ = """ViTImageProcessor""" a_ = """MgpstrTokenizer""" def __init__( self : Dict ,_a : Optional[int]=None ,_a : Dict=None ,**_a : Optional[int] ): '''simple docstring''' A_ : str = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : Optional[int] = kwargs.pop("""feature_extractor""" ) A_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) A_ : Union[str, Any] = tokenizer A_ : List[Any] = AutoTokenizer.from_pretrained("""gpt2""" ) A_ : str = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(_a ,_a ) def __call__( self : Optional[int] ,_a : str=None ,_a : int=None ,_a : int=None ,**_a : Any ): '''simple docstring''' if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: A_ : int = self.image_processor(_a ,return_tensors=_a ,**_a ) if text is not None: A_ : Tuple = self.char_tokenizer(_a ,return_tensors=_a ,**_a ) if text is None: return inputs elif images is None: return encodings else: A_ : int = encodings["""input_ids"""] return inputs def _a ( self : Optional[Any] ,_a : Any ): '''simple docstring''' A_ , A_ , A_ : List[str] = sequences A_ : List[str] = char_preds.size(0 ) A_ , A_ : Dict = self._decode_helper(_a ,"""char""" ) A_ , A_ : List[str] = self._decode_helper(_a ,"""bpe""" ) A_ , A_ : Union[str, Any] = self._decode_helper(_a ,"""wp""" ) A_ : str = [] A_ : Any = [] for i in range(_a ): A_ : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]] A_ : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]] A_ : Any = scores.index(max(_a ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) A_ : Optional[int] = {} A_ : Dict = final_strs A_ : Dict = final_scores A_ : List[Any] = char_strs A_ : Optional[Any] = bpe_strs A_ : Optional[int] = wp_strs return out def _a ( self : List[str] ,_a : Dict ,_a : str ): '''simple docstring''' if format == DecodeType.CHARACTER: A_ : Dict = self.char_decode A_ : int = 1 A_ : List[str] = """[s]""" elif format == DecodeType.BPE: A_ : Optional[int] = self.bpe_decode A_ : List[Any] = 2 A_ : Tuple = """#""" elif format == DecodeType.WORDPIECE: A_ : int = self.wp_decode A_ : Optional[Any] = 102 A_ : List[str] = """[SEP]""" else: raise ValueError(f'Format {format} is not supported.' ) A_ , A_ : Any = [], [] A_ : Tuple = pred_logits.size(0 ) A_ : Union[str, Any] = pred_logits.size(1 ) A_ , A_ : List[str] = pred_logits.topk(1 ,dim=-1 ,largest=_a ,sorted=_a ) A_ : Optional[int] = preds_index.view(-1 ,_a )[:, 1:] A_ : Dict = decoder(_a ) A_ , A_ : Tuple = torch.nn.functional.softmax(_a ,dim=2 ).max(dim=2 ) A_ : str = preds_max_prob[:, 1:] for index in range(_a ): A_ : List[str] = preds_str[index].find(_a ) A_ : int = preds_str[index][:pred_eos] A_ : Union[str, Any] = preds_index[index].cpu().tolist() A_ : Union[str, Any] = pred_index.index(_a ) if eos_token in pred_index else -1 A_ : Any = preds_max_prob[index][: pred_eos_index + 1] A_ : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_a ) conf_scores.append(_a ) return dec_strs, conf_scores def _a ( self : Optional[Any] ,_a : Any ): '''simple docstring''' A_ : str = [seq.replace(""" """ ,"""""" ) for seq in self.char_tokenizer.batch_decode(_a )] return decode_strs def _a ( self : Union[str, Any] ,_a : Optional[Any] ): '''simple docstring''' return self.bpe_tokenizer.batch_decode(_a ) def _a ( self : int ,_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = [seq.replace(""" """ ,"""""" ) for seq in self.wp_tokenizer.batch_decode(_a )] return decode_strs
27
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ): A_ : int = symbols(lowerCamelCase) A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase) A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase)) A_ : str = starting_point while True: if diff_function(lowerCamelCase) != 0: A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function( lowerCamelCase) else: raise ZeroDivisionError("""Could not find root""") from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess) < precision: return next_guess A_ : Union[str, Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( 'The root of log(y) - 1 = 0 is ', f"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any]): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) A_ : List[Any] = (boundary[1] - boundary[0]) / steps A_ : Optional[Any] = boundary[0] A_ : List[Any] = boundary[1] A_ : Optional[int] = make_points(lowerCamelCase , lowerCamelCase , lowerCamelCase) A_ : Union[str, Any] = 0.0 y += (h / 2.0) * f(lowerCamelCase) for i in x_i: # print(i) y += h * f(lowerCamelCase) y += (h / 2.0) * f(lowerCamelCase) return y def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict): A_ : List[Any] = a + h while x < (b - h): yield x A_ : str = x + h def lowerCamelCase ( lowerCamelCase : Union[str, Any]): # enter your function here A_ : Tuple = (x - 0) * (x - 0) return y def lowerCamelCase ( ): A_ : int = 0.0 # Lower bound of integration A_ : Optional[Any] = 1.0 # Upper bound of integration A_ : Optional[Any] = 10.0 # define number of steps or resolution A_ : Optional[Any] = [a, b] # define boundary of integration A_ : Tuple = method_a(lowerCamelCase , lowerCamelCase) print(F'y = {y}') if __name__ == "__main__": main()
27
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_a : Dict ): '''simple docstring''' super().__init__() A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a ) A_ : int = list(model.children() )[:-2] A_ : int = nn.Sequential(*_a ) A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : str ,_a : Optional[int] ): '''simple docstring''' A_ : Tuple = self.pool(self.model(_a ) ) A_ : Any = torch.flatten(_a ,start_dim=2 ) A_ : str = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ): '''simple docstring''' A_ : Dict = [json.loads(_a ) for l in open(_a )] A_ : Optional[int] = os.path.dirname(_a ) A_ : Optional[Any] = tokenizer A_ : Optional[Any] = labels A_ : List[Any] = len(_a ) A_ : str = max_seq_length A_ : str = transforms def __len__( self : str ): '''simple docstring''' return len(self.data ) def __getitem__( self : Tuple ,_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) ) A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1] A_ : Optional[int] = sentence[: self.max_seq_length] A_ : Any = torch.zeros(self.n_classes ) A_ : Tuple = 1 A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" ) A_ : Union[str, Any] = self.transforms(_a ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : List[Any] ): '''simple docstring''' A_ : str = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( lowerCamelCase : str): A_ : List[Any] = [len(row["""sentence"""]) for row in batch] A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase) A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)): A_ : str = input_row["""sentence"""] A_ : Tuple = 1 A_ : int = torch.stack([row["""image"""] for row in batch]) A_ : str = torch.stack([row["""label"""] for row in batch]) A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch]) A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ])
27
1
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name __magic_name__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int=8): A_ : Tuple = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A_ : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : str ,_a : UNetaDConditionModel ,_a : DDPMScheduler ,_a : VQModel ,): '''simple docstring''' super().__init__() self.register_modules( unet=_a ,scheduler=_a ,movq=_a ,) A_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _a ( self : Dict ,_a : Any ,_a : List[Any] ,_a : List[Any] ,_a : Union[str, Any] ,_a : List[str] ,_a : Optional[int] ): '''simple docstring''' if latents is None: A_ : Dict = randn_tensor(_a ,generator=_a ,device=_a ,dtype=_a ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) A_ : Dict = latents.to(_a ) A_ : int = latents * scheduler.init_noise_sigma return latents def _a ( self : str ,_a : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) A_ : Tuple = torch.device(f'cuda:{gpu_id}' ) A_ : List[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_a ,_a ) def _a ( self : int ,_a : List[Any]=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) A_ : List[Any] = torch.device(f'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to("""cpu""" ,silence_dtype_warnings=_a ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A_ : Dict = None for cpu_offloaded_model in [self.unet, self.movq]: A_ , A_ : Union[str, Any] = cpu_offload_with_hook(_a ,_a ,prev_module_hook=_a ) # We'll offload the last model manually. A_ : List[str] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _a ( self : Tuple ): '''simple docstring''' if not hasattr(self.unet ,"""_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(_a ,"""_hf_hook""" ) and hasattr(module._hf_hook ,"""execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_a ) def __call__( self : str ,_a : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_a : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_a : int = 512 ,_a : int = 512 ,_a : int = 100 ,_a : float = 4.0 ,_a : int = 1 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,): '''simple docstring''' A_ : Union[str, Any] = self._execution_device A_ : str = guidance_scale > 1.0 if isinstance(_a ,_a ): A_ : str = torch.cat(_a ,dim=0 ) A_ : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt if isinstance(_a ,_a ): A_ : Dict = torch.cat(_a ,dim=0 ) if do_classifier_free_guidance: A_ : str = image_embeds.repeat_interleave(_a ,dim=0 ) A_ : Union[str, Any] = negative_image_embeds.repeat_interleave(_a ,dim=0 ) A_ : Tuple = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=_a ) self.scheduler.set_timesteps(_a ,device=_a ) A_ : str = self.scheduler.timesteps A_ : Optional[Any] = self.unet.config.in_channels A_ , A_ : Any = downscale_height_and_width(_a ,_a ,self.movq_scale_factor ) # create initial latent A_ : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,_a ,_a ,_a ,self.scheduler ,) for i, t in enumerate(self.progress_bar(_a ) ): # expand the latents if we are doing classifier free guidance A_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ : List[Any] = {"""image_embeds""": image_embeds} A_ : int = self.unet( sample=_a ,timestep=_a ,encoder_hidden_states=_a ,added_cond_kwargs=_a ,return_dict=_a ,)[0] if do_classifier_free_guidance: A_ , A_ : Any = noise_pred.split(latents.shape[1] ,dim=1 ) A_ , A_ : List[Any] = noise_pred.chunk(2 ) A_ , A_ : int = variance_pred.chunk(2 ) A_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A_ : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,"""variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A_ , A_ : Tuple = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A_ : Any = self.scheduler.step( _a ,_a ,_a ,generator=_a ,)[0] # post-processing A_ : Tuple = self.movq.decode(_a ,force_not_quantize=_a )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: A_ : int = image * 0.5 + 0.5 A_ : int = image.clamp(0 ,1 ) A_ : Any = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": A_ : List[str] = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
27
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase ( lowerCamelCase : int): if num <= 0: A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.' raise ValueError(lowerCamelCase) A_ : str = [True] * (num + 1) A_ : Tuple = [] A_ : str = 2 A_ : Any = int(math.sqrt(lowerCamelCase)) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase): if sieve[i] is True: A_ : Union[str, Any] = False start += 1 for j in range(end + 1 , num + 1): if sieve[j] is True: prime.append(lowerCamelCase) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
27
1
'''simple docstring''' from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean __magic_name__ = 0 __magic_name__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __magic_name__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right __magic_name__ = tuple[int, int] class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : int ,_a : int ,_a : int ,_a : int ,_a : int ,_a : Node | None ,): '''simple docstring''' A_ : Union[str, Any] = pos_x A_ : Union[str, Any] = pos_y A_ : Optional[int] = (pos_y, pos_x) A_ : Optional[Any] = goal_x A_ : Union[str, Any] = goal_y A_ : Any = g_cost A_ : Optional[Any] = parent A_ : Tuple = self.calculate_heuristic() A_ : Union[str, Any] = self.g_cost + self.h_cost def _a ( self : Any ): '''simple docstring''' A_ : Dict = self.pos_x - self.goal_x A_ : Optional[Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_a ) + abs(_a ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : List[Any] ,_a : Node ): '''simple docstring''' return self.f_cost < other.f_cost class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] ,_a : TPosition ,_a : TPosition ): '''simple docstring''' A_ : str = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_a ) A_ : List[str] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_a ) A_ : List[str] = [self.start] A_ : list[Node] = [] A_ : int = False def _a ( self : Dict ): '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A_ : Dict = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_a ) self.closed_nodes.append(_a ) A_ : Union[str, Any] = self.get_successors(_a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_a ) else: # retrieve the best current path A_ : List[Any] = self.open_nodes.pop(self.open_nodes.index(_a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_a ) else: self.open_nodes.append(_a ) return [self.start.pos] def _a ( self : List[str] ,_a : Node ): '''simple docstring''' A_ : Optional[Any] = [] for action in delta: A_ : int = parent.pos_x + action[1] A_ : List[Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _a ,_a ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_a ,) ) return successors def _a ( self : List[Any] ,_a : Node | None ): '''simple docstring''' A_ : Dict = node A_ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A_ : Any = current_node.parent path.reverse() return path class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] ,_a : TPosition ,_a : TPosition ): '''simple docstring''' A_ : Optional[Any] = AStar(_a ,_a ) A_ : Dict = AStar(_a ,_a ) A_ : Dict = False def _a ( self : List[Any] ): '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() A_ : List[Any] = self.fwd_astar.open_nodes.pop(0 ) A_ : Union[str, Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _a ,_a ) self.fwd_astar.closed_nodes.append(_a ) self.bwd_astar.closed_nodes.append(_a ) A_ : Any = current_bwd_node A_ : List[Any] = current_fwd_node A_ : int = { self.fwd_astar: self.fwd_astar.get_successors(_a ), self.bwd_astar: self.bwd_astar.get_successors(_a ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_a ) else: # retrieve the best current path A_ : str = astar.open_nodes.pop( astar.open_nodes.index(_a ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_a ) else: astar.open_nodes.append(_a ) return [self.fwd_astar.start.pos] def _a ( self : str ,_a : Node ,_a : Node ): '''simple docstring''' A_ : List[Any] = self.fwd_astar.retrace_path(_a ) A_ : Dict = self.bwd_astar.retrace_path(_a ) bwd_path.pop() bwd_path.reverse() A_ : Optional[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] __magic_name__ = (0, 0) __magic_name__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __magic_name__ = time.time() __magic_name__ = AStar(init, goal) __magic_name__ = a_star.search() __magic_name__ = time.time() - start_time print(f"""AStar execution time = {end_time:f} seconds""") __magic_name__ = time.time() __magic_name__ = BidirectionalAStar(init, goal) __magic_name__ = time.time() - bd_start_time print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
27
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __magic_name__ = trt.Logger(trt.Logger.WARNING) __magic_name__ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __magic_name__ = logging.getLogger(__name__) __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __magic_name__ = parser.parse_args() if args.tokenizer_name: __magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __magic_name__ = args.per_device_eval_batch_size __magic_name__ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __magic_name__ = True __magic_name__ = 'temp_engine/bert-fp32.engine' if args.fpaa: __magic_name__ = 'temp_engine/bert-fp16.engine' if args.inta: __magic_name__ = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __magic_name__ = [network.get_input(i) for i in range(network.num_inputs)] __magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __magic_name__ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __magic_name__ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __magic_name__ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]): A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa) A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa) A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase) # start time A_ : List[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Synchronize the stream and take time stream.synchronize() # end time A_ : str = time.time() A_ : Tuple = end_time - start_time A_ : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __magic_name__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __magic_name__ = raw_datasets['validation'].column_names __magic_name__ = 'question' if 'question' in column_names else column_names[0] __magic_name__ = 'context' if 'context' in column_names else column_names[1] __magic_name__ = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __magic_name__ = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __magic_name__ = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase ( lowerCamelCase : Dict): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A_ : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A_ : Union[str, Any] = [] for i in range(len(tokenized_examples["""input_ids"""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase) A_ : Tuple = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A_ : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A_ : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i]) ] return tokenized_examples __magic_name__ = raw_datasets['validation'] # Validation Feature Creation __magic_name__ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __magic_name__ = default_data_collator __magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __magic_name__ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. A_ : Tuple = postprocess_qa_predictions( examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A_ : Dict = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase) __magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize # Allocate device memory for inputs and outputs. __magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __magic_name__ = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f""" Num examples = {len(eval_dataset)}""") logger.info(f""" Batch size = {args.per_device_eval_batch_size}""") __magic_name__ = 0.0 __magic_name__ = 0 __magic_name__ = timeit.default_timer() __magic_name__ = None for step, batch in enumerate(eval_dataloader): __magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __magic_name__ , __magic_name__ = outputs __magic_name__ = torch.tensor(start_logits) __magic_name__ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __magic_name__ = nested_truncate(all_preds, len(eval_dataset)) __magic_name__ = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds) __magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"""Evaluation metrics: {eval_metric}""")
27
1
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = None a_ = None def lowerCamelCase ( lowerCamelCase : TreeNode | None): # Validation def is_valid_tree(lowerCamelCase : TreeNode | None) -> bool: if node is None: return True if not isinstance(lowerCamelCase , lowerCamelCase): return False try: float(node.data) except (TypeError, ValueError): return False return is_valid_tree(node.left) and is_valid_tree(node.right) if not is_valid_tree(lowerCamelCase): raise ValueError( """Each node should be type of TreeNode and data should be float.""") def is_binary_search_tree_recursive_check( lowerCamelCase : TreeNode | None , lowerCamelCase : float , lowerCamelCase : float) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , lowerCamelCase , node.data) and is_binary_search_tree_recursive_check( node.right , node.data , lowerCamelCase) ) return is_binary_search_tree_recursive_check(lowerCamelCase , -float("""inf""") , float("""inf""")) if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __magic_name__ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['ConvNextFeatureExtractor'] __magic_name__ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
27
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) A_ : int = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler("""sample_euler""" ) A_ : str = """A painting of a squirrel eating a burger""" A_ : Any = torch.manual_seed(0 ) A_ : Dict = sd_pipe([prompt] ,generator=_a ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" ) A_ : str = output.images A_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : int = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Any ): '''simple docstring''' A_ : str = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) A_ : Optional[int] = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler("""sample_euler""" ) A_ : List[Any] = """A painting of a squirrel eating a burger""" A_ : Optional[Any] = torch.manual_seed(0 ) A_ : str = sd_pipe([prompt] ,generator=_a ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type="""np""" ) A_ : int = output.images A_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : Optional[Any] = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def _a ( self : Tuple ): '''simple docstring''' A_ : Any = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) A_ : Any = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) A_ : Tuple = """A painting of a squirrel eating a burger""" A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Optional[int] = sd_pipe( [prompt] ,generator=_a ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type="""np""" ,use_karras_sigmas=_a ,) A_ : Tuple = output.images A_ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : Tuple = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
27
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_text_model""" def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Optional[int] = intermediate_size A_ : Optional[int] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : int = max_position_embeddings A_ : str = hidden_act A_ : Union[str, Any] = layer_norm_eps A_ : Tuple = attention_dropout A_ : Union[str, Any] = initializer_range A_ : List[Any] = initializer_factor @classmethod def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : int = cls.get_config_dict(_a ,**_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_vision_model""" def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,): '''simple docstring''' super().__init__(**_a ) A_ : List[str] = hidden_size A_ : Union[str, Any] = intermediate_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : int = num_channels A_ : str = image_size A_ : List[Any] = patch_size A_ : int = hidden_act A_ : List[Any] = layer_norm_eps A_ : List[str] = attention_dropout A_ : str = initializer_range A_ : str = initializer_factor @classmethod def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit""" a_ = True def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,): '''simple docstring''' super().__init__(**_a ) if text_config is None: A_ : List[Any] = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) A_ : Dict = OwlViTTextConfig(**_a ) A_ : Dict = OwlViTVisionConfig(**_a ) A_ : Any = projection_dim A_ : Optional[int] = logit_scale_init_value A_ : Optional[int] = return_dict A_ : Dict = 1.0 @classmethod def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a ) if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) @classmethod def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ): '''simple docstring''' A_ : str = {} A_ : int = text_config A_ : Union[str, Any] = vision_config return cls.from_dict(_a ,**_a ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : Optional[int] = self.vision_config.to_dict() A_ : List[Any] = self.__class__.model_type return output class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : int ): '''simple docstring''' return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _a ( self : str ): '''simple docstring''' return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _a ( self : Optional[Any] ): '''simple docstring''' return 1e-4 def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs( processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a ) A_ : Any = super().generate_dummy_inputs( processor.image_processor ,batch_size=_a ,framework=_a ) return {**text_input_dict, **image_input_dict} @property def _a ( self : Optional[Any] ): '''simple docstring''' return 14
27
1
'''simple docstring''' import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __magic_name__ = '.' if __name__ == "__main__": __magic_name__ = os.path.join(REPO_PATH, 'utils/documentation_tests.txt') __magic_name__ = [] __magic_name__ = [] with open(doctest_file_path) as fp: for line in fp: __magic_name__ = line.strip() __magic_name__ = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __magic_name__ = '\n'.join(non_existent_paths) raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
27
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""input_features""", """is_longer"""] def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,): '''simple docstring''' super().__init__( feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,) A_ : Tuple = top_db A_ : Tuple = truncation A_ : Optional[Any] = padding A_ : Optional[int] = fft_window_size A_ : Dict = (fft_window_size >> 1) + 1 A_ : Any = hop_length A_ : List[Any] = max_length_s A_ : Tuple = max_length_s * sampling_rate A_ : Tuple = sampling_rate A_ : Optional[int] = frequency_min A_ : Tuple = frequency_max A_ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,) A_ : Dict = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,) def _a ( self : int ): '''simple docstring''' A_ : int = copy.deepcopy(self.__dict__ ) A_ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ): '''simple docstring''' A_ : List[str] = spectrogram( _a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,) return log_mel_spectrogram.T def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A_ : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A_ : int = [0] # randomly choose index for each part A_ : List[str] = np.random.choice(ranges[0] ) A_ : int = np.random.choice(ranges[1] ) A_ : Optional[int] = np.random.choice(ranges[2] ) A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :] A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] A_ : Dict = mel[idx_back : idx_back + chunk_frames, :] A_ : Optional[int] = torch.tensor(mel[None, None, :] ) A_ : Dict = torch.nn.functional.interpolate( _a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a ) A_ : str = mel_shrink[0][0].numpy() A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": A_ : Dict = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A_ : Tuple = len(_a ) - max_length A_ : Optional[int] = np.random.randint(0 ,overflow + 1 ) A_ : List[Any] = waveform[idx : idx + max_length] A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A_ : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 ) A_ : str = False else: A_ : str = self._random_mel_fusion(_a ,_a ,_a ) A_ : Optional[Any] = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented' ) else: A_ : Optional[int] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A_ : int = int(max_length / len(_a ) ) A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A_ : List[str] = int(max_length / len(_a ) ) A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) ) A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,): '''simple docstring''' A_ : List[str] = truncation if truncation is not None else self.truncation A_ : List[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) A_ : int = is_batched_numpy or ( isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_a ,np.ndarray ): A_ : str = np.asarray(_a ,dtype=np.floataa ) elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Any = [np.asarray(_a )] # convert to mel spectrogram, truncate and pad if needed. A_ : str = [ self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a ) for waveform in raw_speech ] A_ : int = [] A_ : Any = [] for mel, longer in padded_inputs: input_mel.append(_a ) is_longer.append(_a ) if truncation == "fusion" and sum(_a ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A_ : List[Any] = np.random.randint(0 ,len(_a ) ) A_ : List[str] = True if isinstance(input_mel[0] ,_a ): A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A_ : List[str] = [[longer] for longer in is_longer] A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} A_ : int = BatchFeature(_a ) if return_tensors is not None: A_ : int = input_features.convert_to_tensors(_a ) return input_features
27
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : str): if isinstance(lowerCamelCase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]): return videos elif isinstance(lowerCamelCase , (list, tuple)) and is_valid_image(videos[0]): return [videos] elif is_valid_image(lowerCamelCase): return [[videos]] raise ValueError(F'Could not make batched video from {videos}') class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""pixel_values"""] def __init__( self : List[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,**_a : List[Any] ,): '''simple docstring''' super().__init__(**_a ) A_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256} A_ : Any = get_size_dict(_a ,default_to_square=_a ) A_ : List[str] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : str = get_size_dict(_a ,param_name="""crop_size""" ) A_ : Any = do_resize A_ : List[str] = size A_ : Optional[int] = do_center_crop A_ : Dict = crop_size A_ : str = resample A_ : Tuple = do_rescale A_ : int = rescale_factor A_ : Dict = offset A_ : Optional[Any] = do_normalize A_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A_ : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Dict ,): '''simple docstring''' A_ : int = get_size_dict(_a ,default_to_square=_a ) if "shortest_edge" in size: A_ : Dict = get_resize_output_image_size(_a ,size["""shortest_edge"""] ,default_to_square=_a ) elif "height" in size and "width" in size: A_ : List[Any] = (size["""height"""], size["""width"""]) else: raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[Any] ,): '''simple docstring''' A_ : Tuple = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a ) def _a ( self : int ,_a : np.ndarray ,_a : Union[int, float] ,_a : bool = True ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,): '''simple docstring''' A_ : Optional[int] = image.astype(np.floataa ) if offset: A_ : Optional[int] = image - (scale / 2) return rescale(_a ,scale=_a ,data_format=_a ,**_a ) def _a ( self : Optional[Any] ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[Any] ,): '''simple docstring''' return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a ) def _a ( self : Tuple ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : Dict[str, int] = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,): '''simple docstring''' if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. A_ : Optional[int] = to_numpy_array(_a ) if do_resize: A_ : Tuple = self.resize(image=_a ,size=_a ,resample=_a ) if do_center_crop: A_ : Optional[int] = self.center_crop(_a ,size=_a ) if do_rescale: A_ : Any = self.rescale(image=_a ,scale=_a ,offset=_a ) if do_normalize: A_ : Tuple = self.normalize(image=_a ,mean=_a ,std=_a ) A_ : int = to_channel_dimension_format(_a ,_a ) return image def _a ( self : int ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : Dict[str, int] = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : ChannelDimension = ChannelDimension.FIRST ,**_a : List[Any] ,): '''simple docstring''' A_ : str = do_resize if do_resize is not None else self.do_resize A_ : Tuple = resample if resample is not None else self.resample A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Dict = do_rescale if do_rescale is not None else self.do_rescale A_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : int = offset if offset is not None else self.offset A_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize A_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A_ : int = image_std if image_std is not None else self.image_std A_ : str = size if size is not None else self.size A_ : List[Any] = get_size_dict(_a ,default_to_square=_a ) A_ : Tuple = crop_size if crop_size is not None else self.crop_size A_ : List[str] = get_size_dict(_a ,param_name="""crop_size""" ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) A_ : List[Any] = make_batched(_a ) A_ : List[Any] = [ [ self._preprocess_image( image=_a ,do_resize=_a ,size=_a ,resample=_a ,do_center_crop=_a ,crop_size=_a ,do_rescale=_a ,rescale_factor=_a ,offset=_a ,do_normalize=_a ,image_mean=_a ,image_std=_a ,data_format=_a ,) for img in video ] for video in videos ] A_ : List[Any] = {"""pixel_values""": videos} return BatchFeature(data=_a ,tensor_type=_a )
27
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : str = batch_size A_ : int = seq_length A_ : Union[str, Any] = is_training A_ : Optional[Any] = use_token_type_ids A_ : int = use_labels A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : int = intermediate_size A_ : Tuple = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : int = initializer_range A_ : Optional[Any] = num_labels A_ : str = num_choices A_ : Optional[Any] = scope A_ : List[Any] = self.vocab_size - 1 def _a ( self : Any ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : List[Any] = None if self.use_token_type_ids: A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : int = None A_ : str = None A_ : Union[str, Any] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Any = ids_tensor([self.batch_size] ,self.num_choices ) A_ : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = OpenAIGPTModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a ) A_ : str = model(_a ,token_type_ids=_a ) A_ : Dict = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ): '''simple docstring''' A_ : str = OpenAIGPTLMHeadModel(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ): '''simple docstring''' A_ : Any = OpenAIGPTDoubleHeadsModel(_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ): '''simple docstring''' A_ : List[str] = self.num_labels A_ : int = OpenAIGPTForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = config_and_inputs A_ : int = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) a_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly a_ = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ): '''simple docstring''' A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,) A_ : Any = inputs_dict["""labels"""] A_ : Any = inputs_dict["""labels"""] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,) A_ : int = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=_a ) return inputs_dict def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Tuple = OpenAIGPTModelTester(self ) A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a ) @slow def _a ( self : List[Any] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : List[str] ): '''simple docstring''' A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_a ) A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is A_ : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : int = model.generate(_a ,do_sample=_a ) self.assertListEqual(output_ids[0].tolist() ,_a )
27
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def lowerCamelCase ( lowerCamelCase : int = 100_0000 , lowerCamelCase : int = 10): A_ : defaultdict = defaultdict(lowerCamelCase) for outer_width in range(3 , (t_limit // 4) + 2): if outer_width * outer_width > t_limit: A_ : Optional[Any] = max( ceil(sqrt(outer_width * outer_width - t_limit)) , 1) else: A_ : Dict = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(lowerCamelCase , outer_width - 1 , 2): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10) if __name__ == "__main__": print(f"""{solution() = }""")
27
'''simple docstring''' import baseaa def lowerCamelCase ( lowerCamelCase : str): return baseaa.aaaencode(string.encode("""utf-8""")) def lowerCamelCase ( lowerCamelCase : bytes): return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""") if __name__ == "__main__": import doctest doctest.testmod()
27
1
'''simple docstring''' import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : int ): '''simple docstring''' super().tearDown() gc.collect() def _a ( self : Tuple ): '''simple docstring''' A_ , A_ : Tuple = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" ,from_pt=_a ,dtype=jnp.bfloataa ) A_ , A_ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,controlnet=_a ,from_pt=_a ,dtype=jnp.bfloataa ) A_ : List[str] = controlnet_params A_ : Any = """bird""" A_ : str = jax.device_count() A_ : Tuple = pipe.prepare_text_inputs([prompts] * num_samples ) A_ : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) A_ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples ) A_ : Optional[Any] = jax.random.PRNGKey(0 ) A_ : Union[str, Any] = jax.random.split(_a ,jax.device_count() ) A_ : int = replicate(_a ) A_ : List[str] = shard(_a ) A_ : str = shard(_a ) A_ : Optional[Any] = pipe( prompt_ids=_a ,image=_a ,params=_a ,prng_seed=_a ,num_inference_steps=50 ,jit=_a ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) A_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A_ : Union[str, Any] = images[0, 253:256, 253:256, -1] A_ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A_ : Optional[int] = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _a ( self : Optional[int] ): '''simple docstring''' A_ , A_ : Dict = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" ,from_pt=_a ,dtype=jnp.bfloataa ) A_ , A_ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,controlnet=_a ,from_pt=_a ,dtype=jnp.bfloataa ) A_ : Tuple = controlnet_params A_ : int = """Chef in the kitchen""" A_ : Any = jax.device_count() A_ : Any = pipe.prepare_text_inputs([prompts] * num_samples ) A_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) A_ : str = pipe.prepare_image_inputs([pose_image] * num_samples ) A_ : Optional[Any] = jax.random.PRNGKey(0 ) A_ : Dict = jax.random.split(_a ,jax.device_count() ) A_ : Any = replicate(_a ) A_ : Union[str, Any] = shard(_a ) A_ : int = shard(_a ) A_ : str = pipe( prompt_ids=_a ,image=_a ,params=_a ,prng_seed=_a ,num_inference_steps=50 ,jit=_a ,).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) A_ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) A_ : Union[str, Any] = images[0, 253:256, 253:256, -1] A_ : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A_ : Optional[Any] = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
27
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowerCamelCase ( lowerCamelCase : Optional[Any]): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def lowerCamelCase ( lowerCamelCase : str): # word like '180' or '身高' or '神' for char in word: A_ : Optional[Any] = ord(lowerCamelCase) if not _is_chinese_char(lowerCamelCase): return 0 return 1 def lowerCamelCase ( lowerCamelCase : List[str]): A_ : Any = set() for token in tokens: A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase) if chinese_word: word_set.add(lowerCamelCase) A_ : Any = list(lowerCamelCase) return word_list def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()): if not chinese_word_set: return bert_tokens A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set]) A_ : str = bert_tokens A_ , A_ : Any = 0, len(lowerCamelCase) while start < end: A_ : Tuple = True if is_chinese(bert_word[start]): A_ : List[str] = min(end - start , lowerCamelCase) for i in range(lowerCamelCase , 1 , -1): A_ : Tuple = """""".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1 , start + i): A_ : Dict = """##""" + bert_word[j] A_ : str = start + i A_ : Dict = False break if single_word: start += 1 return bert_word def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer): A_ : Union[str, Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws A_ : int = [get_chinese_word(lowerCamelCase) for r in res] ltp_res.extend(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : List[Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512) bert_res.extend(res["""input_ids"""]) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Union[str, Any] = [] for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase): A_ : List[Any] = [] for id in input_ids: A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase) input_tokens.append(lowerCamelCase) A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase) A_ : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCamelCase): if token[:2] == "##": A_ : Optional[Any] = token[2:] # save chinese tokens' pos if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)): ref_id.append(lowerCamelCase) ref_ids.append(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) return ref_ids def lowerCamelCase ( lowerCamelCase : Tuple): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""") as f: A_ : Optional[int] = f.readlines() A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device A_ : Dict = BertTokenizer.from_pretrained(args.bert) A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase) with open(args.save_path , """w""" , encoding="""utf-8""") as f: A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids] f.writelines(lowerCamelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) __magic_name__ = parser.parse_args() main(args)
27
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __magic_name__ = logging.getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Tuple): return (preds == labels).mean() @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) a_ = field(metadata={"""help""": """Should contain the data files for the task."""} ) a_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def lowerCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) A_ , A_ , A_ : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' """ --overwrite_output_dir to overcome.""") # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase) # Set seed set_seed(training_args.seed) try: A_ : List[str] = processors[data_args.task_name]() A_ : Optional[int] = processor.get_labels() A_ : List[str] = len(lowerCamelCase) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name)) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A_ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A_ : Tuple = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=lowerCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets A_ : Any = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A_ : List[Any] = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase : EvalPrediction) -> Dict: A_ : Dict = np.argmax(p.predictions , axis=1) return {"acc": simple_accuracy(lowerCamelCase , p.label_ids)} # Data collator A_ : str = DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8) if training_args.fpaa else None # Initialize our Trainer A_ : Optional[Any] = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation A_ : List[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""") A_ : List[str] = trainer.evaluate() A_ : Union[str, Any] = os.path.join(training_args.output_dir , """eval_results.txt""") if trainer.is_world_master(): with open(lowerCamelCase , """w""") as writer: logger.info("""***** Eval results *****""") for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase , lowerCamelCase) writer.write("""%s = %s\n""" % (key, value)) results.update(lowerCamelCase) return results def lowerCamelCase ( lowerCamelCase : Any): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
27
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """ViltImageProcessor""" a_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ): '''simple docstring''' A_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a ,_a ) A_ : Optional[Any] = self.image_processor def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,): '''simple docstring''' A_ : int = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) # add pixel_values + pixel_mask A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a ) encoding.update(_a ) return encoding def _a ( self : List[Any] ,*_a : Any ,**_a : Any ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : int ,*_a : int ,**_a : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = self.tokenizer.model_input_names A_ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : str ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,) return self.image_processor_class @property def _a ( self : int ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,) return self.image_processor
27
1
'''simple docstring''' import os from collections.abc import Iterator def lowerCamelCase ( lowerCamelCase : str = "."): for dir_path, dir_names, filenames in os.walk(lowerCamelCase): A_ : Dict = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(lowerCamelCase)[1] in (".py", ".ipynb"): yield os.path.join(lowerCamelCase , lowerCamelCase).lstrip("""./""") def lowerCamelCase ( lowerCamelCase : Optional[Any]): return F'{i * " "}*' if i else "\n##" def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str): A_ : str = old_path.split(os.sep) for i, new_part in enumerate(new_path.split(os.sep)): if (i + 1 > len(lowerCamelCase) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(lowerCamelCase)} {new_part.replace("_" , " ").title()}') return new_path def lowerCamelCase ( lowerCamelCase : str = "."): A_ : Dict = """""" for filepath in sorted(good_file_paths(lowerCamelCase)): A_ , A_ : int = os.path.split(lowerCamelCase) if filepath != old_path: A_ : Optional[int] = print_path(lowerCamelCase , lowerCamelCase) A_ : Optional[int] = (filepath.count(os.sep) + 1) if filepath else 0 A_ : str = F'{filepath}/{filename}'.replace(""" """ , """%20""") A_ : Optional[Any] = os.path.splitext(filename.replace("""_""" , """ """).title())[0] print(F'{md_prefix(lowerCamelCase)} [{filename}]({url})') if __name__ == "__main__": print_directory_md('.')
27
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""torch""", """torchsde"""] def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ): '''simple docstring''' requires_backends(self ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] )
27
1
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : Dict ,_a : Any=2 ,_a : Dict=8 ,_a : str=True ,_a : List[Any]=True ,_a : int=True ,_a : Union[str, Any]=True ,_a : List[str]=99 ,_a : Any=16 ,_a : Optional[Any]=5 ,_a : Any=2 ,_a : List[Any]=36 ,_a : Any="gelu" ,_a : str=0.0 ,_a : List[str]=0.0 ,_a : Union[str, Any]=512 ,_a : Dict=16 ,_a : str=2 ,_a : Any=0.02 ,_a : Union[str, Any]=3 ,_a : List[str]=4 ,_a : Optional[int]=None ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : Optional[Any] = batch_size A_ : Optional[Any] = seq_length A_ : List[str] = is_training A_ : Optional[int] = use_input_mask A_ : List[str] = use_token_type_ids A_ : Optional[Any] = use_labels A_ : int = vocab_size A_ : List[str] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Dict = num_attention_heads A_ : Optional[int] = intermediate_size A_ : int = hidden_act A_ : str = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Optional[int] = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Union[str, Any] = type_sequence_label_size A_ : Optional[Any] = initializer_range A_ : Optional[int] = num_labels A_ : Dict = num_choices A_ : str = scope def _a ( self : Tuple ): '''simple docstring''' A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : int = None if self.use_input_mask: A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Optional[Any] = None if self.use_token_type_ids: A_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : Dict = None A_ : List[str] = None A_ : Dict = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices ) A_ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Optional[Any] ): '''simple docstring''' return MraConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = self.get_config() A_ : Dict = 300 return config def _a ( self : Union[str, Any] ): '''simple docstring''' ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Union[str, Any] = self.prepare_config_and_inputs() A_ : Optional[int] = True A_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _a ( self : Tuple ,_a : str ,_a : Optional[int] ,_a : List[Any] ,_a : Optional[int] ,_a : str ,_a : str ,_a : List[Any] ): '''simple docstring''' A_ : List[Any] = MraModel(config=_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,attention_mask=_a ,token_type_ids=_a ) A_ : List[str] = model(_a ,token_type_ids=_a ) A_ : str = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : int ,_a : List[Any] ,_a : Optional[Any] ,_a : List[str] ,_a : Optional[Any] ,_a : List[str] ,_a : int ,_a : Union[str, Any] ,_a : str ,_a : List[Any] ,): '''simple docstring''' A_ : Optional[int] = True A_ : Any = MraModel(_a ) model.to(_a ) model.eval() A_ : List[Any] = model( _a ,attention_mask=_a ,token_type_ids=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,) A_ : str = model( _a ,attention_mask=_a ,token_type_ids=_a ,encoder_hidden_states=_a ,) A_ : Union[str, Any] = model(_a ,attention_mask=_a ,token_type_ids=_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Optional[Any] ,_a : int ,_a : Optional[Any] ,_a : int ,_a : Tuple ,_a : Tuple ,_a : Dict ,_a : str ): '''simple docstring''' A_ : Optional[Any] = MraForMaskedLM(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Optional[Any] ,_a : str ,_a : Optional[Any] ,_a : Tuple ,_a : Union[str, Any] ,_a : int ,_a : List[Any] ,_a : str ): '''simple docstring''' A_ : str = MraForQuestionAnswering(config=_a ) model.to(_a ) model.eval() A_ : Any = model( _a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _a ( self : str ,_a : int ,_a : str ,_a : Optional[int] ,_a : Any ,_a : Optional[Any] ,_a : str ,_a : int ): '''simple docstring''' A_ : Optional[Any] = self.num_labels A_ : Dict = MraForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : List[str] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : Any ,_a : Optional[Any] ,_a : Optional[int] ,_a : str ,_a : Optional[int] ,_a : Optional[Any] ,_a : Tuple ,_a : Optional[Any] ): '''simple docstring''' A_ : Any = self.num_labels A_ : Union[str, Any] = MraForTokenClassification(config=_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _a ( self : Union[str, Any] ,_a : str ,_a : List[str] ,_a : Tuple ,_a : Optional[Any] ,_a : str ,_a : Tuple ,_a : Tuple ): '''simple docstring''' A_ : Any = self.num_choices A_ : int = MraForMultipleChoice(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A_ : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() A_ : Optional[int] = model( _a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _a ( self : str ): '''simple docstring''' A_ : Any = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Optional[Any] = config_and_inputs A_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) a_ = False a_ = False a_ = False a_ = False a_ = () def _a ( self : Optional[Any] ): '''simple docstring''' A_ : List[str] = MraModelTester(self ) A_ : Optional[int] = ConfigTester(self ,config_class=_a ,hidden_size=37 ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : List[Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : List[Any] = type self.model_tester.create_and_check_model(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_a ) def _a ( self : List[str] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_a ) def _a ( self : int ): '''simple docstring''' A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) @slow def _a ( self : Tuple ): '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = MraModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @unittest.skip(reason="""MRA does not output attentions""" ) def _a ( self : Tuple ): '''simple docstring''' return @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : str ): '''simple docstring''' A_ : Any = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) A_ : Dict = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A_ : Any = model(_a )[0] A_ : str = torch.Size((1, 256, 768) ) self.assertEqual(output.shape ,_a ) A_ : List[Any] = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Optional[int] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) A_ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): A_ : Tuple = model(_a )[0] A_ : str = 50265 A_ : Dict = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape ,_a ) A_ : Tuple = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) ) @slow def _a ( self : Optional[int] ): '''simple docstring''' A_ : str = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) A_ : Optional[int] = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): A_ : List[Any] = model(_a )[0] A_ : List[str] = 50265 A_ : Union[str, Any] = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape ,_a ) A_ : Optional[int] = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
27
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"): A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {} A_ : Optional[int] = padding_side return tokenizer( [line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , ) def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ): A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,): '''simple docstring''' super().__init__() A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" ) A_ : Any = Path(_a ).joinpath(type_path + """.target""" ) A_ : Dict = self.get_char_lens(self.src_file ) A_ : Optional[int] = max_source_length A_ : List[str] = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' A_ : List[Any] = tokenizer A_ : Optional[Any] = prefix if n_obs is not None: A_ : Any = self.src_lens[:n_obs] A_ : Optional[int] = src_lang A_ : Tuple = tgt_lang def __len__( self : Tuple ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[str] ,_a : Tuple ): '''simple docstring''' A_ : int = index + 1 # linecache starts at 1 A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" ) A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ : List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer ) A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" ) A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" ) A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze() A_ : Dict = target_inputs["""input_ids"""].squeeze() A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _a ( _a : int ): '''simple docstring''' return [len(_a ) for x in Path(_a ).open().readlines()] def _a ( self : Optional[int] ,_a : Dict ): '''simple docstring''' A_ : str = torch.stack([x["""input_ids"""] for x in batch] ) A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] ) A_ : Union[str, Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : List[str] = trim_batch(_a ,_a ) A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a ) A_ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __magic_name__ = getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[List]): return list(itertools.chain.from_iterable(lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : str): A_ : Union[str, Any] = get_git_info() save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json""")) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]): with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Any): with open(lowerCamelCase) as f: return json.load(lowerCamelCase) def lowerCamelCase ( ): A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase) A_ : Union[str, Any] = { """repo_id""": str(lowerCamelCase), """repo_sha""": str(repo.head.object.hexsha), """repo_branch""": str(repo.active_branch), """hostname""": str(socket.gethostname()), } return repo_infos def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable): return list(map(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]): with open(lowerCamelCase , """wb""") as f: return pickle.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str]): def remove_articles(lowerCamelCase : Any): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase) def white_space_fix(lowerCamelCase : List[Any]): return " ".join(text.split()) def remove_punc(lowerCamelCase : Union[str, Any]): A_ : Optional[int] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(lowerCamelCase : List[str]): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase)))) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): A_ : Tuple = normalize_answer(lowerCamelCase).split() A_ : Dict = normalize_answer(lowerCamelCase).split() A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase) A_ : Any = sum(common.values()) if num_same == 0: return 0 A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any): return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]): assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Any = 0 for hypo, pred in zip(lowerCamelCase , lowerCamelCase): em += exact_match_score(lowerCamelCase , lowerCamelCase) if len(lowerCamelCase) > 0: em /= len(lowerCamelCase) return {"em": em} def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return model_prefix.startswith("""rag""") def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]): A_ : Optional[Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ : Tuple = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase): if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) continue A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p] setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) return hparams, config
27
1
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters __magic_name__ = False __magic_name__ = False def lowerCamelCase ( lowerCamelCase : Namespace): return TrainCommand(lowerCamelCase) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def _a ( _a : ArgumentParser ): '''simple docstring''' A_ : Union[str, Any] = parser.add_parser("""train""" ,help="""CLI tool to train a model on a task.""" ) train_parser.add_argument( """--train_data""" ,type=_a ,required=_a ,help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" ,) train_parser.add_argument( """--column_label""" ,type=_a ,default=0 ,help="""Column of the dataset csv file with example labels.""" ) train_parser.add_argument( """--column_text""" ,type=_a ,default=1 ,help="""Column of the dataset csv file with example texts.""" ) train_parser.add_argument( """--column_id""" ,type=_a ,default=2 ,help="""Column of the dataset csv file with example ids.""" ) train_parser.add_argument( """--skip_first_row""" ,action="""store_true""" ,help="""Skip the first row of the csv file (headers).""" ) train_parser.add_argument("""--validation_data""" ,type=_a ,default="""""" ,help="""path to validation dataset.""" ) train_parser.add_argument( """--validation_split""" ,type=_a ,default=0.1 ,help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" ,) train_parser.add_argument("""--output""" ,type=_a ,default="""./""" ,help="""path to saved the trained model.""" ) train_parser.add_argument( """--task""" ,type=_a ,default="""text_classification""" ,help="""Task to train the model on.""" ) train_parser.add_argument( """--model""" ,type=_a ,default="""bert-base-uncased""" ,help="""Model's name or path to stored model.""" ) train_parser.add_argument("""--train_batch_size""" ,type=_a ,default=32 ,help="""Batch size for training.""" ) train_parser.add_argument("""--valid_batch_size""" ,type=_a ,default=64 ,help="""Batch size for validation.""" ) train_parser.add_argument("""--learning_rate""" ,type=_a ,default=3e-5 ,help="""Learning rate.""" ) train_parser.add_argument("""--adam_epsilon""" ,type=_a ,default=1e-08 ,help="""Epsilon for Adam optimizer.""" ) train_parser.set_defaults(func=_a ) def __init__( self : Optional[Any] ,_a : Namespace ): '''simple docstring''' A_ : int = logging.get_logger("""transformers-cli/training""" ) A_ : Optional[Any] = """tf""" if is_tf_available() else """torch""" os.makedirs(args.output ,exist_ok=_a ) A_ : Any = args.output A_ : str = args.column_label A_ : List[Any] = args.column_text A_ : Dict = args.column_id self.logger.info(f'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": A_ : List[Any] = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'Loading dataset from {args.train_data}' ) A_ : Tuple = Processor.create_from_csv( args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,) A_ : List[Any] = None if args.validation_data: self.logger.info(f'Loading validation dataset from {args.validation_data}' ) A_ : Tuple = Processor.create_from_csv( args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,) A_ : int = args.validation_split A_ : Optional[Any] = args.train_batch_size A_ : List[Any] = args.valid_batch_size A_ : List[Any] = args.learning_rate A_ : Optional[int] = args.adam_epsilon def _a ( self : Optional[int] ): '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def _a ( self : List[Any] ): '''simple docstring''' raise NotImplementedError def _a ( self : Dict ): '''simple docstring''' self.pipeline.fit( self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,) # Save trained pipeline self.pipeline.save_pretrained(self.output )
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_text_model""" def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Optional[int] = intermediate_size A_ : Optional[int] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : int = max_position_embeddings A_ : str = hidden_act A_ : Union[str, Any] = layer_norm_eps A_ : Tuple = attention_dropout A_ : Union[str, Any] = initializer_range A_ : List[Any] = initializer_factor @classmethod def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : int = cls.get_config_dict(_a ,**_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_vision_model""" def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,): '''simple docstring''' super().__init__(**_a ) A_ : List[str] = hidden_size A_ : Union[str, Any] = intermediate_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : int = num_channels A_ : str = image_size A_ : List[Any] = patch_size A_ : int = hidden_act A_ : List[Any] = layer_norm_eps A_ : List[str] = attention_dropout A_ : str = initializer_range A_ : str = initializer_factor @classmethod def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit""" a_ = True def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,): '''simple docstring''' super().__init__(**_a ) if text_config is None: A_ : List[Any] = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) A_ : Dict = OwlViTTextConfig(**_a ) A_ : Dict = OwlViTVisionConfig(**_a ) A_ : Any = projection_dim A_ : Optional[int] = logit_scale_init_value A_ : Optional[int] = return_dict A_ : Dict = 1.0 @classmethod def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a ) if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) @classmethod def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ): '''simple docstring''' A_ : str = {} A_ : int = text_config A_ : Union[str, Any] = vision_config return cls.from_dict(_a ,**_a ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : Optional[int] = self.vision_config.to_dict() A_ : List[Any] = self.__class__.model_type return output class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : int ): '''simple docstring''' return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _a ( self : str ): '''simple docstring''' return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _a ( self : Optional[Any] ): '''simple docstring''' return 1e-4 def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs( processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a ) A_ : Any = super().generate_dummy_inputs( processor.image_processor ,batch_size=_a ,framework=_a ) return {**text_input_dict, **image_input_dict} @property def _a ( self : Optional[Any] ): '''simple docstring''' return 14
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = KandinskyVaaControlnetPipeline a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a_ = False @property def _a ( self : Any ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return self.time_input_dim @property def _a ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): '''simple docstring''' return 100 @property def _a ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[Any] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } A_ : Tuple = UNetaDConditionModel(**_a ) return model @property def _a ( self : List[str] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _a ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) A_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : List[str] ): '''simple docstring''' A_ : Optional[Any] = self.dummy_unet A_ : int = self.dummy_movq A_ : Tuple = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,) A_ : int = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ): '''simple docstring''' A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a ) A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( _a ) # create hint A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("""mps""" ): A_ : Optional[Any] = torch.manual_seed(_a ) else: A_ : str = torch.Generator(device=_a ).manual_seed(_a ) A_ : List[Any] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def _a ( self : Dict ): '''simple docstring''' A_ : List[Any] = """cpu""" A_ : List[str] = self.get_dummy_components() A_ : Tuple = self.pipeline_class(**_a ) A_ : Dict = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) ) A_ : Tuple = output.images A_ : Optional[Any] = pipe( **self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0] A_ : Tuple = image[0, -3:, -3:, -1] A_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[Any] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Any ): '''simple docstring''' A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) A_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0 A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(_a ) A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) A_ : Union[str, Any] = pipeline.to(_a ) pipeline.set_progress_bar_config(disable=_a ) A_ : Optional[Any] = """A robot, 4k photo""" A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ , A_ : List[str] = pipe_prior( _a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ : List[Any] = pipeline( image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,) A_ : Dict = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_a ,_a )
27
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """roformer""" def __init__( self : List[Any] ,_a : Tuple=50000 ,_a : List[str]=None ,_a : int=768 ,_a : List[str]=12 ,_a : Optional[Any]=12 ,_a : Union[str, Any]=3072 ,_a : Optional[int]="gelu" ,_a : Dict=0.1 ,_a : List[str]=0.1 ,_a : Any=1536 ,_a : Optional[Any]=2 ,_a : List[Any]=0.02 ,_a : Dict=1e-12 ,_a : Union[str, Any]=0 ,_a : List[str]=False ,_a : str=True ,**_a : Optional[Any] ,): '''simple docstring''' super().__init__(pad_token_id=_a ,**_a ) A_ : Optional[int] = vocab_size A_ : str = hidden_size if embedding_size is None else embedding_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : Tuple = num_attention_heads A_ : List[str] = hidden_act A_ : str = intermediate_size A_ : Union[str, Any] = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : Union[str, Any] = max_position_embeddings A_ : List[str] = type_vocab_size A_ : List[str] = initializer_range A_ : List[str] = layer_norm_eps A_ : Optional[Any] = rotary_value A_ : Union[str, Any] = use_cache class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": A_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : int = {0: """batch""", 1: """sequence"""} A_ : Union[str, Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
27
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """deberta-v2""" def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,): '''simple docstring''' super().__init__(**_a ) A_ : Union[str, Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[Any] = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : List[Any] = initializer_range A_ : int = relative_attention A_ : Tuple = max_relative_positions A_ : int = pad_token_id A_ : Tuple = position_biased_input # Backwards compatibility if type(_a ) == str: A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )] A_ : Any = pos_att_type A_ : Optional[int] = vocab_size A_ : Tuple = layer_norm_eps A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a ) A_ : Union[str, Any] = pooler_dropout A_ : List[Any] = pooler_hidden_act class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : Any = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def _a ( self : Optional[int] ): '''simple docstring''' return 12 def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
27
1
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,*_a : List[Any] ,_a : Optional[Any]=None ,_a : int=None ,**_a : Tuple ): '''simple docstring''' super().__init__(*_a ,**_a ) A_ : Tuple = eval_examples A_ : Optional[int] = post_process_function def _a ( self : Tuple ,_a : Tuple=None ,_a : Union[str, Any]=None ,_a : List[Any]=None ,_a : str = "eval" ): '''simple docstring''' A_ : str = self.eval_dataset if eval_dataset is None else eval_dataset A_ : Optional[Any] = self.get_eval_dataloader(_a ) A_ : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. A_ : List[str] = self.compute_metrics A_ : Tuple = None A_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop A_ : List[str] = time.time() try: A_ : Dict = eval_loop( _a ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,) finally: A_ : Optional[Any] = compute_metrics A_ : Tuple = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( _a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default A_ : int = self.post_process_function(_a ,_a ,output.predictions ) A_ : str = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): A_ : List[Any] = metrics.pop(_a ) metrics.update(output.metrics ) else: A_ : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_a ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) A_ : List[Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_a ) return metrics def _a ( self : Optional[Any] ,_a : Dict ,_a : List[Any] ,_a : List[Any]=None ,_a : str = "test" ): '''simple docstring''' A_ : str = self.get_test_dataloader(_a ) # Temporarily disable metric computation, we will do it in the loop here. A_ : Any = self.compute_metrics A_ : List[Any] = None A_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop A_ : int = time.time() try: A_ : str = eval_loop( _a ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,) finally: A_ : Tuple = compute_metrics A_ : Optional[Any] = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( _a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) ) if self.post_process_function is None or self.compute_metrics is None: return output A_ : Any = self.post_process_function(_a ,_a ,output.predictions ,"""predict""" ) A_ : Union[str, Any] = self.compute_metrics(_a ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): A_ : str = metrics.pop(_a ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_a )
27
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') __magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) __magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) __magic_name__ = BeautifulSoup(res.text, 'html.parser') __magic_name__ = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(f"""https://google.com{link.get('href')}""")
27
1
'''simple docstring''' import cmath import math def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float): A_ : Optional[Any] = math.radians(lowerCamelCase) A_ : Any = math.radians(lowerCamelCase) # Convert voltage and current to rectangular form A_ : Optional[int] = cmath.rect(lowerCamelCase , lowerCamelCase) A_ : Optional[int] = cmath.rect(lowerCamelCase , lowerCamelCase) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' from ... import PretrainedConfig __magic_name__ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = """nezha""" def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Tuple = hidden_act A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Optional[Any] = max_relative_position A_ : List[Any] = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : Dict = classifier_dropout A_ : int = use_cache
27
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """t5""" a_ = ["""past_key_values"""] a_ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : List[Any] ,_a : List[str]=32128 ,_a : Any=512 ,_a : Dict=64 ,_a : List[Any]=2048 ,_a : str=6 ,_a : List[Any]=None ,_a : Optional[Any]=8 ,_a : List[str]=32 ,_a : Dict=128 ,_a : Optional[Any]=0.1 ,_a : str=1e-6 ,_a : Optional[Any]=1.0 ,_a : List[Any]="relu" ,_a : Optional[Any]=True ,_a : Optional[int]=True ,_a : Optional[Any]=0 ,_a : Optional[int]=1 ,**_a : List[str] ,): '''simple docstring''' A_ : Optional[int] = vocab_size A_ : Dict = d_model A_ : List[Any] = d_kv A_ : Tuple = d_ff A_ : Any = num_layers A_ : Optional[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A_ : List[Any] = num_heads A_ : Optional[Any] = relative_attention_num_buckets A_ : Optional[Any] = relative_attention_max_distance A_ : int = dropout_rate A_ : List[Any] = layer_norm_epsilon A_ : int = initializer_factor A_ : Any = feed_forward_proj A_ : List[str] = use_cache A_ : Optional[Any] = self.feed_forward_proj.split("""-""" ) A_ : Optional[int] = act_info[-1] A_ : Tuple = act_info[0] == """gated""" if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' """Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """ """'gated-gelu' or 'relu'""" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A_ : Dict = """gelu_new""" super().__init__( pad_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,**_a ,) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = { """input_ids""": {0: """batch""", 1: """encoder_sequence"""}, """attention_mask""": {0: """batch""", 1: """encoder_sequence"""}, } if self.use_past: A_ : Union[str, Any] = """past_encoder_sequence + sequence""" A_ : int = {0: """batch"""} A_ : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: A_ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""} A_ : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_a ,direction="""inputs""" ) return common_inputs @property def _a ( self : List[str] ): '''simple docstring''' return 13
27
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str): A_ , A_ : List[Any] = set(lowerCamelCase), [start] while stack: A_ : Optional[Any] = stack.pop() explored.add(lowerCamelCase) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(lowerCamelCase) return explored __magic_name__ = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
27
1
'''simple docstring''' def lowerCamelCase ( ): for n in range(1 , 100_0000): yield n * (n + 1) // 2 def lowerCamelCase ( lowerCamelCase : Union[str, Any]): A_ : List[Any] = 1 A_ : Optional[int] = 2 while i * i <= n: A_ : List[str] = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def lowerCamelCase ( ): return next(i for i in triangle_number_generator() if count_divisors(lowerCamelCase) > 500) if __name__ == "__main__": print(solution())
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Dict): A_ : List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: A_ : Union[str, Any] = [144, 192, 240] A_ : int = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: A_ : List[str] = [96, 120, 144] A_ : Any = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: A_ : Any = [64, 80, 96] A_ : List[str] = [16, 16, 24, 48, 64, 80, 320] A_ : Any = 0.05 A_ : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_"""): A_ : int = 512 A_ : Optional[int] = 16 A_ : List[Any] = 21 A_ : List[str] = """pascal-voc-id2label.json""" else: A_ : str = 1000 A_ : Any = """imagenet-1k-id2label.json""" A_ : Any = """huggingface/label-files""" A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r""")) A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False): for i in range(1 , 6): if F'layer_{i}.' in name: A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.') if "conv_1." in name: A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""") if ".block." in name: A_ : Optional[Any] = name.replace(""".block.""" , """.""") if "exp_1x1" in name: A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""") if "red_1x1" in name: A_ : int = name.replace("""red_1x1""" , """reduce_1x1""") if ".local_rep.conv_3x3." in name: A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""") if ".local_rep.conv_1x1." in name: A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""") if ".norm." in name: A_ : Tuple = name.replace(""".norm.""" , """.normalization.""") if ".conv." in name: A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""") if ".conv_proj." in name: A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""") for i in range(0 , 2): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.') if "expand_1x1" in name: A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""") if "conv_3x3" in name: A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""") if "reduce_1x1" in name: A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""") for i in range(2 , 5): if F'.global_rep.{i}.weight' in name: A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""") if F'.global_rep.{i}.bias' in name: A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""") if ".global_rep." in name: A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""") if ".pre_norm_mha.0." in name: A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""") if ".pre_norm_mha.1.out_proj." in name: A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""") if ".pre_norm_ffn.0." in name: A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""") if ".pre_norm_ffn.1." in name: A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""") if ".pre_norm_ffn.4." in name: A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""") if ".transformer." in name: A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""") if ".aspp_layer." in name: A_ : int = name.replace(""".aspp_layer.""" , """.""") if ".aspp_pool." in name: A_ : Tuple = name.replace(""".aspp_pool.""" , """.""") if "seg_head." in name: A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""") if "segmentation_head.classifier.classifier." in name: A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""") if "classifier.fc." in name: A_ : str = name.replace("""classifier.fc.""" , """classifier.""") elif (not base_model) and ("segmentation_head." not in name): A_ : str = """mobilevit.""" + name return name def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False): if base_model: A_ : Dict = """""" else: A_ : Any = """mobilevit.""" for key in orig_state_dict.copy().keys(): A_ : List[Any] = orig_state_dict.pop(lowerCamelCase) if key[:8] == "encoder.": A_ : int = key[8:] if "qkv" in key: A_ : Any = key.split(""".""") A_ : str = int(key_split[0][6:]) - 1 A_ : int = int(key_split[3]) A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}') A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size A_ : Optional[Any] = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: A_ : Dict = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : List[Any] = val[-dim:, :] else: A_ : Optional[Any] = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : Any = val[-dim:] else: A_ : List[str] = val return orig_state_dict def lowerCamelCase ( ): A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw) return im @torch.no_grad() def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False): A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase) # load original state_dict A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""") # load 🤗 model if mobilevit_name.startswith("""deeplabv3_"""): A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval() else: A_ : str = MobileViTForImageClassification(lowerCamelCase).eval() A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase) model.load_state_dict(lowerCamelCase) # Check outputs on an image, prepared by MobileViTImageProcessor A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") A_ : List[Any] = model(**lowerCamelCase) A_ : Dict = outputs.logits if mobilevit_name.startswith("""deeplabv3_"""): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": A_ : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": A_ : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": A_ : Tuple = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4) Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase) print(F'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase) if push_to_hub: A_ : str = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""") A_ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase , organization="""apple""") model.push_to_hub(lowerCamelCase , organization="""apple""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __magic_name__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
27
1
'''simple docstring''' __magic_name__ = 'Alexander Joslin' import operator as op from .stack import Stack def lowerCamelCase ( lowerCamelCase : str): A_ : List[str] = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} A_ : Stack[int] = Stack() A_ : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowerCamelCase)) elif i in operators: # RULE 2 operator_stack.push(lowerCamelCase) elif i == ")": # RULE 4 A_ : List[str] = operator_stack.peek() operator_stack.pop() A_ : List[Any] = operand_stack.peek() operand_stack.pop() A_ : List[Any] = operand_stack.peek() operand_stack.pop() A_ : Any = operators[opr](lowerCamelCase , lowerCamelCase) operand_stack.push(lowerCamelCase) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __magic_name__ = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
27
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __magic_name__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""pixel_values"""] def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,): '''simple docstring''' super().__init__(**_a ) A_ : Tuple = size if size is not None else {"""shortest_edge""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" ) A_ : Any = do_resize A_ : List[str] = size A_ : Union[str, Any] = resample A_ : Dict = do_center_crop A_ : List[str] = crop_size A_ : Any = do_rescale A_ : Union[str, Any] = rescale_factor A_ : Any = do_normalize A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD A_ : Tuple = do_convert_rgb def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,): '''simple docstring''' A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a ) return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a ) def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,): '''simple docstring''' A_ : Optional[int] = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,): '''simple docstring''' return rescale(_a ,scale=_a ,data_format=_a ,**_a ) def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,): '''simple docstring''' return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a ) def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,): '''simple docstring''' A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A_ : Tuple = size if size is not None else self.size A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a ) A_ : List[str] = resample if resample is not None else self.resample A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop A_ : Any = crop_size if crop_size is not None else self.crop_size A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a ) A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor A_ : Any = do_normalize if do_normalize is not None else self.do_normalize A_ : int = image_mean if image_mean is not None else self.image_mean A_ : int = image_std if image_std is not None else self.image_std A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A_ : int = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: A_ : Optional[int] = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. A_ : Dict = [to_numpy_array(_a ) for image in images] if do_resize: A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images] if do_center_crop: A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images] if do_rescale: A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images] if do_normalize: A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images] A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images] A_ : List[str] = {"""pixel_values""": images} return BatchFeature(data=_a ,tensor_type=_a )
27
1
'''simple docstring''' import numpy as np def lowerCamelCase ( lowerCamelCase : np.array): return (2 / (1 + np.exp(-2 * vector))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ): '''simple docstring''' warnings.warn( """The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use OwlViTImageProcessor instead.""" ,_a ,) super().__init__(*_a ,**_a )
27
1
'''simple docstring''' from math import pi, sqrt, tan def lowerCamelCase ( lowerCamelCase : float): if side_length < 0: raise ValueError("""surface_area_cube() only accepts non-negative values""") return 6 * side_length**2 def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float): if length < 0 or breadth < 0 or height < 0: raise ValueError("""surface_area_cuboid() only accepts non-negative values""") return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowerCamelCase ( lowerCamelCase : float): if radius < 0: raise ValueError("""surface_area_sphere() only accepts non-negative values""") return 4 * pi * radius**2 def lowerCamelCase ( lowerCamelCase : float): if radius < 0: raise ValueError("""surface_area_hemisphere() only accepts non-negative values""") return 3 * pi * radius**2 def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if radius < 0 or height < 0: raise ValueError("""surface_area_cone() only accepts non-negative values""") return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( """surface_area_conical_frustum() only accepts non-negative values""") A_ : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if radius < 0 or height < 0: raise ValueError("""surface_area_cylinder() only accepts non-negative values""") return 2 * pi * radius * (height + radius) def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if torus_radius < 0 or tube_radius < 0: raise ValueError("""surface_area_torus() only accepts non-negative values""") if torus_radius < tube_radius: raise ValueError( """surface_area_torus() does not support spindle or self intersecting tori""") return 4 * pow(lowerCamelCase , 2) * torus_radius * tube_radius def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if length < 0 or width < 0: raise ValueError("""area_rectangle() only accepts non-negative values""") return length * width def lowerCamelCase ( lowerCamelCase : float): if side_length < 0: raise ValueError("""area_square() only accepts non-negative values""") return side_length**2 def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if base < 0 or height < 0: raise ValueError("""area_triangle() only accepts non-negative values""") return (base * height) / 2 def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("""area_triangle_three_sides() only accepts non-negative values""") elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("""Given three sides do not form a triangle""") A_ : List[str] = (sidea + sidea + sidea) / 2 A_ : Optional[int] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea)) return area def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if base < 0 or height < 0: raise ValueError("""area_parallelogram() only accepts non-negative values""") return base * height def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float): if basea < 0 or basea < 0 or height < 0: raise ValueError("""area_trapezium() only accepts non-negative values""") return 1 / 2 * (basea + basea) * height def lowerCamelCase ( lowerCamelCase : float): if radius < 0: raise ValueError("""area_circle() only accepts non-negative values""") return pi * radius**2 def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if radius_x < 0 or radius_y < 0: raise ValueError("""area_ellipse() only accepts non-negative values""") return pi * radius_x * radius_y def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float): if diagonal_a < 0 or diagonal_a < 0: raise ValueError("""area_rhombus() only accepts non-negative values""") return 1 / 2 * diagonal_a * diagonal_a def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : float): if not isinstance(lowerCamelCase , lowerCamelCase) or sides < 3: raise ValueError( """area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides""") elif length < 0: raise ValueError( """area_reg_polygon() only accepts non-negative values as \ length of a side""") return (sides * length**2) / (4 * tan(pi / sides)) return (sides * length**2) / (4 * tan(pi / sides)) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(f"""Rectangle: {area_rectangle(10, 20) = }""") print(f"""Square: {area_square(10) = }""") print(f"""Triangle: {area_triangle(10, 10) = }""") print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""") print(f"""Parallelogram: {area_parallelogram(10, 20) = }""") print(f"""Rhombus: {area_rhombus(10, 20) = }""") print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""") print(f"""Circle: {area_circle(20) = }""") print(f"""Ellipse: {area_ellipse(10, 20) = }""") print('\nSurface Areas of various geometric shapes: \n') print(f"""Cube: {surface_area_cube(20) = }""") print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""") print(f"""Sphere: {surface_area_sphere(20) = }""") print(f"""Hemisphere: {surface_area_hemisphere(20) = }""") print(f"""Cone: {surface_area_cone(10, 20) = }""") print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""") print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""") print(f"""Torus: {surface_area_torus(20, 10) = }""") print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""") print(f"""Square: {area_reg_polygon(4, 10) = }""") print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
27
'''simple docstring''' from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ): A_ : int = symbols(lowerCamelCase) A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase) A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase)) A_ : str = starting_point while True: if diff_function(lowerCamelCase) != 0: A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function( lowerCamelCase) else: raise ZeroDivisionError("""Could not find root""") from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess) < precision: return next_guess A_ : Union[str, Any] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( 'The root of log(y) - 1 = 0 is ', f"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""", ) # Find root of cos(x) print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
27
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Any ,*_a : str ,**_a : int ): '''simple docstring''' warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" ,_a ,) super().__init__(*_a ,**_a )
27
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict ,_a : Dict ): '''simple docstring''' super().__init__() A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a ) A_ : int = list(model.children() )[:-2] A_ : int = nn.Sequential(*_a ) A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : str ,_a : Optional[int] ): '''simple docstring''' A_ : Tuple = self.pool(self.model(_a ) ) A_ : Any = torch.flatten(_a ,start_dim=2 ) A_ : str = out.transpose(1 ,2 ).contiguous() return out # BxNx2048 class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ): '''simple docstring''' A_ : Dict = [json.loads(_a ) for l in open(_a )] A_ : Optional[int] = os.path.dirname(_a ) A_ : Optional[Any] = tokenizer A_ : Optional[Any] = labels A_ : List[Any] = len(_a ) A_ : str = max_seq_length A_ : str = transforms def __len__( self : str ): '''simple docstring''' return len(self.data ) def __getitem__( self : Tuple ,_a : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) ) A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1] A_ : Optional[int] = sentence[: self.max_seq_length] A_ : Any = torch.zeros(self.n_classes ) A_ : Tuple = 1 A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" ) A_ : Union[str, Any] = self.transforms(_a ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : List[Any] ): '''simple docstring''' A_ : str = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( lowerCamelCase : str): A_ : List[Any] = [len(row["""sentence"""]) for row in batch] A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase) A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)): A_ : str = input_row["""sentence"""] A_ : Tuple = 1 A_ : int = torch.stack([row["""image"""] for row in batch]) A_ : str = torch.stack([row["""label"""] for row in batch]) A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch]) A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ])
27
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """pytorch""", """script""": """run_ddp.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """tensorflow""", """script""": """run_tf_dist.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7}, }, ] ) class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : List[str] ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() ,encoding="""utf-8""" ,check=_a ,) assert hasattr(self ,"""env""" ) def _a ( self : Optional[Any] ,_a : Dict ): '''simple docstring''' A_ : str = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings A_ : Dict = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=_a ,instance_count=_a ,instance_type=self.instance_type ,debugger_hook_config=_a ,hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=_a ,py_version="""py36""" ,) def _a ( self : Tuple ,_a : Tuple ): '''simple docstring''' TrainingJobAnalytics(_a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def _a ( self : Any ,_a : str ): '''simple docstring''' A_ : List[Any] = self.create_estimator(_a ) # run training estimator.fit() # result dataframe A_ : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) A_ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A_ : List[str] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" ,999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' ,"""w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} ,_a )
27
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase ( lowerCamelCase : int): if num <= 0: A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.' raise ValueError(lowerCamelCase) A_ : str = [True] * (num + 1) A_ : Tuple = [] A_ : str = 2 A_ : Any = int(math.sqrt(lowerCamelCase)) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase): if sieve[i] is True: A_ : Union[str, Any] = False start += 1 for j in range(end + 1 , num + 1): if sieve[j] is True: prime.append(lowerCamelCase) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): return int((input_a, input_a).count(0) != 0) def lowerCamelCase ( ): assert nand_gate(0 , 0) == 1 assert nand_gate(0 , 1) == 1 assert nand_gate(1 , 0) == 1 assert nand_gate(1 , 1) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
27
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __magic_name__ = trt.Logger(trt.Logger.WARNING) __magic_name__ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __magic_name__ = logging.getLogger(__name__) __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __magic_name__ = parser.parse_args() if args.tokenizer_name: __magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __magic_name__ = args.per_device_eval_batch_size __magic_name__ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __magic_name__ = True __magic_name__ = 'temp_engine/bert-fp32.engine' if args.fpaa: __magic_name__ = 'temp_engine/bert-fp16.engine' if args.inta: __magic_name__ = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __magic_name__ = [network.get_input(i) for i in range(network.num_inputs)] __magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __magic_name__ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __magic_name__ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __magic_name__ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]): A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa) A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa) A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase) # start time A_ : List[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Synchronize the stream and take time stream.synchronize() # end time A_ : str = time.time() A_ : Tuple = end_time - start_time A_ : Any = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __magic_name__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __magic_name__ = raw_datasets['validation'].column_names __magic_name__ = 'question' if 'question' in column_names else column_names[0] __magic_name__ = 'context' if 'context' in column_names else column_names[1] __magic_name__ = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __magic_name__ = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __magic_name__ = min(args.max_seq_length, tokenizer.model_max_length) def lowerCamelCase ( lowerCamelCase : Dict): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A_ : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A_ : Union[str, Any] = [] for i in range(len(tokenized_examples["""input_ids"""])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase) A_ : Tuple = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A_ : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A_ : Dict = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i]) ] return tokenized_examples __magic_name__ = raw_datasets['validation'] # Validation Feature Creation __magic_name__ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __magic_name__ = default_data_collator __magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __magic_name__ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. A_ : Tuple = postprocess_qa_predictions( examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A_ : Dict = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase) __magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize # Allocate device memory for inputs and outputs. __magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) __magic_name__ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __magic_name__ = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f""" Num examples = {len(eval_dataset)}""") logger.info(f""" Batch size = {args.per_device_eval_batch_size}""") __magic_name__ = 0.0 __magic_name__ = 0 __magic_name__ = timeit.default_timer() __magic_name__ = None for step, batch in enumerate(eval_dataloader): __magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __magic_name__ , __magic_name__ = outputs __magic_name__ = torch.tensor(start_logits) __magic_name__ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __magic_name__ = nested_truncate(all_preds, len(eval_dataset)) __magic_name__ = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds) __magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"""Evaluation metrics: {eval_metric}""")
27
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer __magic_name__ = ['gpt2'] __magic_name__ = 'gpt2' if is_tf_available(): class __lowerCAmelCase ( tf.Module ): '''simple docstring''' def __init__( self : Any ,_a : List[Any] ): '''simple docstring''' super().__init__() A_ : Union[str, Any] = tokenizer A_ : str = AutoConfig.from_pretrained(_a ) A_ : int = TFGPTaLMHeadModel.from_config(_a ) @tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name="""text""" ),) ) def _a ( self : Tuple ,_a : List[str] ): '''simple docstring''' A_ : Union[str, Any] = self.tokenizer(_a ) A_ : str = tokenized["""input_ids"""].to_tensor() A_ : List[Any] = tf.cast(input_ids_dense > 0 ,tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) A_ : int = self.model(input_ids=_a ,attention_mask=_a )["""logits"""] return outputs @require_tf @require_keras_nlp class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Optional[Any] ): '''simple docstring''' super().setUp() A_ : str = [GPTaTokenizer.from_pretrained(_a ) for checkpoint in (TOKENIZER_CHECKPOINTS)] A_ : str = [TFGPTaTokenizer.from_pretrained(_a ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) A_ : Tuple = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] A_ : Any = list(zip(self.test_sentences ,self.test_sentences[::-1] ) ) def _a ( self : Any ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ): for test_inputs in self.test_sentences: A_ : List[str] = tokenizer([test_inputs] ,return_tensors="""tf""" ) A_ : Optional[Any] = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors A_ : str = python_outputs[key].numpy() A_ : List[Any] = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(_a ,tf.intaa ) == tf_outputs_values ) ) @slow def _a ( self : Union[str, Any] ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: A_ : Any = tf.function(_a ) for test_inputs in self.test_sentences: A_ : int = tf.constant(_a ) A_ : Any = compiled_tokenizer(_a ) A_ : Tuple = tf_tokenizer(_a ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _a ( self : Any ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: A_ : Tuple = ModelToSave(tokenizer=_a ) A_ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] ) A_ : Any = model.serving(_a ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: A_ : Dict = Path(_a ) / """saved.model""" tf.saved_model.save(_a ,_a ,signatures={"""serving_default""": model.serving} ) A_ : Dict = tf.saved_model.load(_a ) A_ : Union[str, Any] = loaded_model.signatures["""serving_default"""](_a )["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def _a ( self : List[Any] ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: A_ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) A_ : Optional[Any] = tf_tokenizer(_a ) # Build model with some sample inputs A_ : Tuple = tf_tokenizer.get_config() A_ : Tuple = TFGPTaTokenizer.from_config(_a ) A_ : Optional[int] = model_from_config(_a ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def _a ( self : Any ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run A_ : Any = 123123 for max_length in [3, 5, 1024]: A_ : Any = tf.convert_to_tensor([self.test_sentences[0]] ) A_ : Optional[int] = tf_tokenizer(_a ,max_length=_a ) A_ : int = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __magic_name__ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['ConvNextFeatureExtractor'] __magic_name__ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
27
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __magic_name__ = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_text_model""" def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Optional[int] = intermediate_size A_ : Optional[int] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : int = max_position_embeddings A_ : str = hidden_act A_ : Union[str, Any] = layer_norm_eps A_ : Tuple = attention_dropout A_ : Union[str, Any] = initializer_range A_ : List[Any] = initializer_factor @classmethod def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : int = cls.get_config_dict(_a ,**_a ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit_vision_model""" def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,): '''simple docstring''' super().__init__(**_a ) A_ : List[str] = hidden_size A_ : Union[str, Any] = intermediate_size A_ : Union[str, Any] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : int = num_channels A_ : str = image_size A_ : List[Any] = patch_size A_ : int = hidden_act A_ : List[Any] = layer_norm_eps A_ : List[str] = attention_dropout A_ : str = initializer_range A_ : str = initializer_factor @classmethod def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": A_ : List[str] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """owlvit""" a_ = True def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,): '''simple docstring''' super().__init__(**_a ) if text_config is None: A_ : List[Any] = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: A_ : Tuple = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) A_ : Dict = OwlViTTextConfig(**_a ) A_ : Dict = OwlViTVisionConfig(**_a ) A_ : Any = projection_dim A_ : Optional[int] = logit_scale_init_value A_ : Optional[int] = return_dict A_ : Dict = 1.0 @classmethod def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ): '''simple docstring''' cls._set_token_in_kwargs(_a ) A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a ) if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_a ,**_a ) @classmethod def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ): '''simple docstring''' A_ : str = {} A_ : int = text_config A_ : Union[str, Any] = vision_config return cls.from_dict(_a ,**_a ) def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = copy.deepcopy(self.__dict__ ) A_ : str = self.text_config.to_dict() A_ : Optional[int] = self.vision_config.to_dict() A_ : List[Any] = self.__class__.model_type return output class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : int ): '''simple docstring''' return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _a ( self : str ): '''simple docstring''' return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _a ( self : Optional[Any] ): '''simple docstring''' return 1e-4 def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs( processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a ) A_ : Any = super().generate_dummy_inputs( processor.image_processor ,batch_size=_a ,framework=_a ) return {**text_input_dict, **image_input_dict} @property def _a ( self : Optional[Any] ): '''simple docstring''' return 14
27
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable __magic_name__ = list[list[float | int]] def lowerCamelCase ( lowerCamelCase : Matrix , lowerCamelCase : Matrix): A_ : int = len(lowerCamelCase) A_ : Matrix = [[0 for _ in range(size + 1)] for _ in range(lowerCamelCase)] A_ : int A_ : int A_ : int A_ : int A_ : int A_ : float for row in range(lowerCamelCase): for col in range(lowerCamelCase): A_ : List[Any] = matrix[row][col] A_ : Union[str, Any] = vector[row][0] A_ : str = 0 A_ : Dict = 0 while row < size and col < size: # pivoting A_ : Tuple = max((abs(augmented[rowa][col]), rowa) for rowa in range(lowerCamelCase , lowerCamelCase))[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: A_ , A_ : List[str] = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowerCamelCase): A_ : List[str] = augmented[rowa][col] / augmented[row][col] A_ : str = 0 for cola in range(col + 1 , size + 1): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowerCamelCase): for row in range(lowerCamelCase): A_ : Union[str, Any] = augmented[row][col] / augmented[col][col] for cola in range(lowerCamelCase , size + 1): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10)] for row in range(lowerCamelCase) ] def lowerCamelCase ( lowerCamelCase : list[int]): A_ : int = len(lowerCamelCase) A_ : Matrix = [[0 for _ in range(lowerCamelCase)] for _ in range(lowerCamelCase)] A_ : Matrix = [[0] for _ in range(lowerCamelCase)] A_ : Matrix A_ : int A_ : int A_ : int for x_val, y_val in enumerate(lowerCamelCase): for col in range(lowerCamelCase): A_ : str = (x_val + 1) ** (size - col - 1) A_ : Tuple = y_val A_ : Tuple = solve(lowerCamelCase , lowerCamelCase) def interpolated_func(lowerCamelCase : int) -> int: return sum( round(coeffs[x_val][0]) * (var ** (size - x_val - 1)) for x_val in range(lowerCamelCase)) return interpolated_func def lowerCamelCase ( lowerCamelCase : int): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def lowerCamelCase ( lowerCamelCase : Callable[[int], int] = question_function , lowerCamelCase : int = 10): A_ : list[int] = [func(lowerCamelCase) for x_val in range(1 , order + 1)] A_ : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1) ] A_ : int = 0 A_ : Callable[[int], int] A_ : int for poly in polynomials: A_ : int = 1 while func(lowerCamelCase) == poly(lowerCamelCase): x_val += 1 ret += poly(lowerCamelCase) return ret if __name__ == "__main__": print(f"""{solution() = }""")
27
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""input_features""", """is_longer"""] def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,): '''simple docstring''' super().__init__( feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,) A_ : Tuple = top_db A_ : Tuple = truncation A_ : Optional[Any] = padding A_ : Optional[int] = fft_window_size A_ : Dict = (fft_window_size >> 1) + 1 A_ : Any = hop_length A_ : List[Any] = max_length_s A_ : Tuple = max_length_s * sampling_rate A_ : Tuple = sampling_rate A_ : Optional[int] = frequency_min A_ : Tuple = frequency_max A_ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,) A_ : Dict = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,) def _a ( self : int ): '''simple docstring''' A_ : int = copy.deepcopy(self.__dict__ ) A_ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ): '''simple docstring''' A_ : List[str] = spectrogram( _a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,) return log_mel_spectrogram.T def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A_ : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A_ : int = [0] # randomly choose index for each part A_ : List[str] = np.random.choice(ranges[0] ) A_ : int = np.random.choice(ranges[1] ) A_ : Optional[int] = np.random.choice(ranges[2] ) A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :] A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] A_ : Dict = mel[idx_back : idx_back + chunk_frames, :] A_ : Optional[int] = torch.tensor(mel[None, None, :] ) A_ : Dict = torch.nn.functional.interpolate( _a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a ) A_ : str = mel_shrink[0][0].numpy() A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": A_ : Dict = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A_ : Tuple = len(_a ) - max_length A_ : Optional[int] = np.random.randint(0 ,overflow + 1 ) A_ : List[Any] = waveform[idx : idx + max_length] A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A_ : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 ) A_ : str = False else: A_ : str = self._random_mel_fusion(_a ,_a ,_a ) A_ : Optional[Any] = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented' ) else: A_ : Optional[int] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A_ : int = int(max_length / len(_a ) ) A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A_ : List[str] = int(max_length / len(_a ) ) A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) ) A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,): '''simple docstring''' A_ : List[str] = truncation if truncation is not None else self.truncation A_ : List[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) A_ : int = is_batched_numpy or ( isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_a ,np.ndarray ): A_ : str = np.asarray(_a ,dtype=np.floataa ) elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Any = [np.asarray(_a )] # convert to mel spectrogram, truncate and pad if needed. A_ : str = [ self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a ) for waveform in raw_speech ] A_ : int = [] A_ : Any = [] for mel, longer in padded_inputs: input_mel.append(_a ) is_longer.append(_a ) if truncation == "fusion" and sum(_a ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A_ : List[Any] = np.random.randint(0 ,len(_a ) ) A_ : List[str] = True if isinstance(input_mel[0] ,_a ): A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A_ : List[str] = [[longer] for longer in is_longer] A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} A_ : int = BatchFeature(_a ) if return_tensors is not None: A_ : int = input_features.convert_to_tensors(_a ) return input_features
27
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json', } # fmt: off __magic_name__ = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] __magic_name__ = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """whisper""" a_ = ["""past_key_values"""] a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : str ,_a : List[str]=51865 ,_a : Optional[Any]=80 ,_a : List[Any]=6 ,_a : int=4 ,_a : Optional[int]=6 ,_a : int=4 ,_a : Optional[Any]=1536 ,_a : Union[str, Any]=1536 ,_a : Union[str, Any]=0.0 ,_a : List[Any]=0.0 ,_a : Tuple=50257 ,_a : Optional[int]=True ,_a : Dict=True ,_a : int="gelu" ,_a : Dict=256 ,_a : Optional[Any]=0.0 ,_a : Optional[Any]=0.0 ,_a : Union[str, Any]=0.0 ,_a : Optional[Any]=0.02 ,_a : Dict=False ,_a : Tuple=1500 ,_a : Dict=448 ,_a : Tuple=50256 ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : Optional[int]=None ,_a : Tuple=[220, 50256] ,_a : Dict=False ,_a : List[str]=256 ,_a : Optional[int]=False ,_a : int=0.05 ,_a : Any=10 ,_a : int=2 ,_a : int=0.0 ,_a : Union[str, Any]=10 ,_a : Optional[int]=0 ,_a : int=7 ,**_a : Optional[int] ,): '''simple docstring''' A_ : Union[str, Any] = vocab_size A_ : List[str] = num_mel_bins A_ : Tuple = d_model A_ : List[str] = encoder_layers A_ : List[Any] = encoder_attention_heads A_ : Dict = decoder_layers A_ : Any = decoder_attention_heads A_ : Union[str, Any] = decoder_ffn_dim A_ : Optional[Any] = encoder_ffn_dim A_ : Union[str, Any] = dropout A_ : Tuple = attention_dropout A_ : Tuple = activation_dropout A_ : str = activation_function A_ : Union[str, Any] = init_std A_ : Optional[Any] = encoder_layerdrop A_ : List[Any] = decoder_layerdrop A_ : Tuple = use_cache A_ : Optional[Any] = encoder_layers A_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[int] = max_source_positions A_ : str = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. A_ : Tuple = classifier_proj_size A_ : Tuple = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : Any = apply_spec_augment A_ : Optional[int] = mask_time_prob A_ : Optional[int] = mask_time_length A_ : Optional[int] = mask_time_min_masks A_ : Optional[Any] = mask_feature_prob A_ : Optional[Any] = mask_feature_length A_ : Any = mask_feature_min_masks A_ : List[str] = median_filter_width super().__init__( pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,decoder_start_token_id=_a ,suppress_tokens=_a ,begin_suppress_tokens=_a ,**_a ,) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Optional[int] = OrderedDict( [ ("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}), ] ) if self.use_past: A_ : Optional[int] = {0: """batch"""} else: A_ : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_a ,direction="""inputs""" ) return common_inputs def _a ( self : Dict ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 22050 ,_a : float = 5.0 ,_a : int = 220 ,): '''simple docstring''' A_ : int = OrderedDict() A_ : Dict = OnnxConfig.generate_dummy_inputs( self ,preprocessor=preprocessor.feature_extractor ,batch_size=_a ,framework=_a ,sampling_rate=_a ,time_duration=_a ,frequency=_a ,) A_ : int = encoder_inputs["""input_features"""].shape[2] A_ : Union[str, Any] = encoder_sequence_length // 2 if self.use_past else seq_length A_ : int = super().generate_dummy_inputs( preprocessor.tokenizer ,_a ,_a ,_a ,_a ) A_ : Optional[Any] = encoder_inputs.pop("""input_features""" ) A_ : Dict = decoder_inputs.pop("""decoder_input_ids""" ) if "past_key_values" in decoder_inputs: A_ : List[Any] = decoder_inputs.pop("""past_key_values""" ) return dummy_inputs @property def _a ( self : Any ): '''simple docstring''' return 1e-3
27
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,): '''simple docstring''' A_ : Optional[Any] = parent A_ : str = batch_size A_ : int = seq_length A_ : Union[str, Any] = is_training A_ : Optional[Any] = use_token_type_ids A_ : int = use_labels A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : Tuple = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : int = intermediate_size A_ : Tuple = hidden_act A_ : int = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : Tuple = type_sequence_label_size A_ : int = initializer_range A_ : Optional[Any] = num_labels A_ : str = num_choices A_ : Optional[Any] = scope A_ : List[Any] = self.vocab_size - 1 def _a ( self : Any ): '''simple docstring''' A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : List[Any] = None if self.use_token_type_ids: A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : int = None A_ : str = None A_ : Union[str, Any] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Any = ids_tensor([self.batch_size] ,self.num_choices ) A_ : List[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[Any] = OpenAIGPTModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a ) A_ : str = model(_a ,token_type_ids=_a ) A_ : Dict = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ): '''simple docstring''' A_ : str = OpenAIGPTLMHeadModel(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ): '''simple docstring''' A_ : Any = OpenAIGPTDoubleHeadsModel(_a ) model.to(_a ) model.eval() A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ): '''simple docstring''' A_ : List[str] = self.num_labels A_ : int = OpenAIGPTForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : str = config_and_inputs A_ : int = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) a_ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly a_ = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ): '''simple docstring''' A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A_ : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,) A_ : Any = inputs_dict["""labels"""] A_ : Any = inputs_dict["""labels"""] A_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,) A_ : int = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=_a ) return inputs_dict def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Tuple = OpenAIGPTModelTester(self ) A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 ) def _a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a ) @slow def _a ( self : List[Any] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def _a ( self : List[str] ): '''simple docstring''' A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(_a ) A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is A_ : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A_ : int = model.generate(_a ,do_sample=_a ) self.assertListEqual(output_ids[0].tolist() ,_a )
27
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""input_features""", """is_longer"""] def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,): '''simple docstring''' super().__init__( feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,) A_ : Tuple = top_db A_ : Tuple = truncation A_ : Optional[Any] = padding A_ : Optional[int] = fft_window_size A_ : Dict = (fft_window_size >> 1) + 1 A_ : Any = hop_length A_ : List[Any] = max_length_s A_ : Tuple = max_length_s * sampling_rate A_ : Tuple = sampling_rate A_ : Optional[int] = frequency_min A_ : Tuple = frequency_max A_ : Tuple = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,) A_ : Dict = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,) def _a ( self : int ): '''simple docstring''' A_ : int = copy.deepcopy(self.__dict__ ) A_ : Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ): '''simple docstring''' A_ : List[str] = spectrogram( _a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,) return log_mel_spectrogram.T def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk A_ : List[Any] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk A_ : int = [0] # randomly choose index for each part A_ : List[str] = np.random.choice(ranges[0] ) A_ : int = np.random.choice(ranges[1] ) A_ : Optional[int] = np.random.choice(ranges[2] ) A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :] A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :] A_ : Dict = mel[idx_back : idx_back + chunk_frames, :] A_ : Optional[int] = torch.tensor(mel[None, None, :] ) A_ : Dict = torch.nn.functional.interpolate( _a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a ) A_ : str = mel_shrink[0][0].numpy() A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 ) return mel_fusion def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": A_ : Dict = True # random crop to max_length (for compatibility) -> this should be handled by self.pad A_ : Tuple = len(_a ) - max_length A_ : Optional[int] = np.random.randint(0 ,overflow + 1 ) A_ : List[Any] = waveform[idx : idx + max_length] A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] elif truncation == "fusion": A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed A_ : Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 ) A_ : str = False else: A_ : str = self._random_mel_fusion(_a ,_a ,_a ) A_ : Optional[Any] = True else: raise NotImplementedError(f'data_truncating {truncation} not implemented' ) else: A_ : Optional[int] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": A_ : int = int(max_length / len(_a ) ) A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length] if padding == "repeatpad": A_ : List[str] = int(max_length / len(_a ) ) A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) ) A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 ) if truncation == "fusion": A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters ) A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 ) else: A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,): '''simple docstring''' A_ : List[str] = truncation if truncation is not None else self.truncation A_ : List[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a' f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input' f' was sampled with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) A_ : int = is_batched_numpy or ( isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_a ,np.ndarray ): A_ : str = np.asarray(_a ,dtype=np.floataa ) elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A_ : Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A_ : Any = [np.asarray(_a )] # convert to mel spectrogram, truncate and pad if needed. A_ : str = [ self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a ) for waveform in raw_speech ] A_ : int = [] A_ : Any = [] for mel, longer in padded_inputs: input_mel.append(_a ) is_longer.append(_a ) if truncation == "fusion" and sum(_a ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer A_ : List[Any] = np.random.randint(0 ,len(_a ) ) A_ : List[str] = True if isinstance(input_mel[0] ,_a ): A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool A_ : List[str] = [[longer] for longer in is_longer] A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} A_ : int = BatchFeature(_a ) if return_tensors is not None: A_ : int = input_features.convert_to_tensors(_a ) return input_features
27
'''simple docstring''' import baseaa def lowerCamelCase ( lowerCamelCase : str): return baseaa.aaaencode(string.encode("""utf-8""")) def lowerCamelCase ( lowerCamelCase : bytes): return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""") if __name__ == "__main__": import doctest doctest.testmod()
27
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """ViltImageProcessor""" a_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ): '''simple docstring''' A_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a ,_a ) A_ : Optional[Any] = self.image_processor def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,): '''simple docstring''' A_ : int = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) # add pixel_values + pixel_mask A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a ) encoding.update(_a ) return encoding def _a ( self : List[Any] ,*_a : Any ,**_a : Any ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : int ,*_a : int ,**_a : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = self.tokenizer.model_input_names A_ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : str ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,) return self.image_processor_class @property def _a ( self : int ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,) return self.image_processor
27
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowerCamelCase ( lowerCamelCase : Optional[Any]): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def lowerCamelCase ( lowerCamelCase : str): # word like '180' or '身高' or '神' for char in word: A_ : Optional[Any] = ord(lowerCamelCase) if not _is_chinese_char(lowerCamelCase): return 0 return 1 def lowerCamelCase ( lowerCamelCase : List[str]): A_ : Any = set() for token in tokens: A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase) if chinese_word: word_set.add(lowerCamelCase) A_ : Any = list(lowerCamelCase) return word_list def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()): if not chinese_word_set: return bert_tokens A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set]) A_ : str = bert_tokens A_ , A_ : Any = 0, len(lowerCamelCase) while start < end: A_ : Tuple = True if is_chinese(bert_word[start]): A_ : List[str] = min(end - start , lowerCamelCase) for i in range(lowerCamelCase , 1 , -1): A_ : Tuple = """""".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1 , start + i): A_ : Dict = """##""" + bert_word[j] A_ : str = start + i A_ : Dict = False break if single_word: start += 1 return bert_word def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer): A_ : Union[str, Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws A_ : int = [get_chinese_word(lowerCamelCase) for r in res] ltp_res.extend(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : List[Any] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512) bert_res.extend(res["""input_ids"""]) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Union[str, Any] = [] for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase): A_ : List[Any] = [] for id in input_ids: A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase) input_tokens.append(lowerCamelCase) A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase) A_ : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCamelCase): if token[:2] == "##": A_ : Optional[Any] = token[2:] # save chinese tokens' pos if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)): ref_id.append(lowerCamelCase) ref_ids.append(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) return ref_ids def lowerCamelCase ( lowerCamelCase : Tuple): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""") as f: A_ : Optional[int] = f.readlines() A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device A_ : Dict = BertTokenizer.from_pretrained(args.bert) A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase) with open(args.save_path , """w""" , encoding="""utf-8""") as f: A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids] f.writelines(lowerCamelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) __magic_name__ = parser.parse_args() main(args)
27
1
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowerCamelCase ( lowerCamelCase : dict): return (data["data"], data["target"]) def lowerCamelCase ( lowerCamelCase : np.ndarray , lowerCamelCase : np.ndarray , lowerCamelCase : np.ndarray): A_ : int = XGBRegressor(verbosity=0 , random_state=42) xgb.fit(lowerCamelCase , lowerCamelCase) # Predict target for test data A_ : Union[str, Any] = xgb.predict(lowerCamelCase) A_ : int = predictions.reshape(len(lowerCamelCase) , 1) return predictions def lowerCamelCase ( ): A_ : Union[str, Any] = fetch_california_housing() A_ , A_ : List[str] = data_handling(lowerCamelCase) A_ , A_ , A_ , A_ : List[Any] = train_test_split( lowerCamelCase , lowerCamelCase , test_size=0.25 , random_state=1) A_ : Any = xgboost(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Error printing print(F'Mean Absolute Error : {mean_absolute_error(lowerCamelCase , lowerCamelCase)}') print(F'Mean Square Error : {mean_squared_error(lowerCamelCase , lowerCamelCase)}') if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
27
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """ViltImageProcessor""" a_ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ): '''simple docstring''' A_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : List[str] = kwargs.pop("""feature_extractor""" ) A_ : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a ,_a ) A_ : Optional[Any] = self.image_processor def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,): '''simple docstring''' A_ : int = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) # add pixel_values + pixel_mask A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a ) encoding.update(_a ) return encoding def _a ( self : List[Any] ,*_a : Any ,**_a : Any ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : int ,*_a : int ,**_a : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = self.tokenizer.model_input_names A_ : str = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a ( self : str ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,) return self.image_processor_class @property def _a ( self : int ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,) return self.image_processor
27
1
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __magic_name__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' a_ = None def lowerCamelCase ( lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : List[int] , ): import pyspark def generate_fn(): A_ : Tuple = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""")) for partition_id in partition_order: A_ : List[str] = df_with_partition_id.select("""*""").where(F'part_id = {partition_id}').drop("""part_id""") A_ : List[Any] = partition_df.collect() A_ : List[Any] = 0 for row in rows: yield F'{partition_id}_{row_id}', row.asDict() row_id += 1 return generate_fn class __lowerCAmelCase ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self : Optional[Any] ,_a : "pyspark.sql.DataFrame" ,_a : Union[str, Any]=None ,): '''simple docstring''' A_ : Optional[int] = df A_ : Any = partition_order or range(self.df.rdd.getNumPartitions() ) A_ : Tuple = _generate_iterable_examples(self.df ,self.partition_order ) def __iter__( self : Optional[Any] ): '''simple docstring''' yield from self.generate_examples_fn() def _a ( self : Optional[int] ,_a : np.random.Generator ): '''simple docstring''' A_ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_a ) return SparkExamplesIterable(self.df ,partition_order=_a ) def _a ( self : Optional[Any] ,_a : int ,_a : int ): '''simple docstring''' A_ : Optional[Any] = self.split_shard_indices_by_worker(_a ,_a ) return SparkExamplesIterable(self.df ,partition_order=_a ) @property def _a ( self : Tuple ): '''simple docstring''' return len(self.partition_order ) class __lowerCAmelCase ( datasets.DatasetBuilder ): '''simple docstring''' a_ = SparkConfig def __init__( self : Any ,_a : "pyspark.sql.DataFrame" ,_a : str = None ,_a : str = None ,**_a : Optional[int] ,): '''simple docstring''' import pyspark A_ : int = pyspark.sql.SparkSession.builder.getOrCreate() A_ : Optional[int] = df A_ : List[Any] = working_dir super().__init__( cache_dir=_a ,config_name=str(self.df.semanticHash() ) ,**_a ,) def _a ( self : int ): '''simple docstring''' def create_cache_and_write_probe(_a : Any ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir ,exist_ok=_a ) A_ : Optional[Any] = os.path.join(self._cache_dir ,"""fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_a ,"""a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" ,"""""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: A_ : Any = ( self._spark.sparkContext.parallelize(range(1 ) ,1 ).mapPartitions(_a ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def _a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def _a ( self : List[Any] ,_a : datasets.download.download_manager.DownloadManager ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def _a ( self : List[Any] ,_a : List[Any] ): '''simple docstring''' import pyspark def get_arrow_batch_size(_a : int ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) A_ : int = self.df.count() A_ : str = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. A_ : Optional[Any] = ( self.df.limit(_a ) .repartition(1 ) .mapInArrow(_a ,"""batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) A_ : Optional[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. A_ : List[str] = min(_a ,int(approx_total_size / max_shard_size ) ) A_ : Optional[Any] = self.df.repartition(_a ) def _a ( self : int ,_a : str ,_a : str ,_a : int ,): '''simple docstring''' import pyspark A_ : List[Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter A_ : Optional[Any] = os.path.join(self._working_dir ,os.path.basename(_a ) ) if self._working_dir else fpath A_ : int = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. A_ : Dict = self.config.features A_ : Optional[Any] = self._writer_batch_size A_ : Any = self._fs.storage_options def write_arrow(_a : Union[str, Any] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. A_ : str = pyspark.TaskContext().taskAttemptId() A_ : Tuple = next(_a ,_a ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) A_ : Optional[int] = 0 A_ : Any = writer_class( features=_a ,path=working_fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,writer_batch_size=_a ,storage_options=_a ,embed_local_files=_a ,) A_ : Union[str, Any] = pa.Table.from_batches([first_batch] ) writer.write_table(_a ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: A_ , A_ : Optional[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) shard_id += 1 A_ : Dict = writer_class( features=writer._features ,path=working_fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,writer_batch_size=_a ,storage_options=_a ,embed_local_files=_a ,) A_ : List[str] = pa.Table.from_batches([batch] ) writer.write_table(_a ) if writer._num_bytes > 0: A_ , A_ : Optional[int] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] ,names=["""task_id""", """num_examples""", """num_bytes"""] ,) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_a ) ): A_ : List[str] = os.path.join(os.path.dirname(_a ) ,os.path.basename(_a ) ) shutil.move(_a ,_a ) A_ : Dict = ( self.df.mapInArrow(_a ,"""task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) ,pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) ,pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) ,pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) ,) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _a ( self : Union[str, Any] ,_a : "datasets.SplitGenerator" ,_a : str = "arrow" ,_a : Optional[Union[str, int]] = None ,_a : Optional[int] = None ,**_a : int ,): '''simple docstring''' self._validate_cache_dir() A_ : Any = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_a ) A_ : List[str] = not is_remote_filesystem(self._fs ) A_ : List[str] = os.path.join if is_local else posixpath.join A_ : List[str] = """-TTTTT-SSSSS-of-NNNNN""" A_ : List[str] = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}' A_ : Tuple = path_join(self._output_dir ,_a ) A_ : Dict = 0 A_ : Optional[Any] = 0 A_ : Optional[Any] = 0 A_ : Any = [] A_ : str = [] for task_id, content in self._prepare_split_single(_a ,_a ,_a ): ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Optional[Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_a ) A_ : Any = total_num_examples A_ : Any = total_num_bytes # should rename everything at the end logger.debug(f'Renaming {total_shards} shards.' ) if total_shards > 1: A_ : str = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. A_ : Optional[int] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _a : int ,_a : int ,_a : int ,): rename( _a ,fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,fpath.replace("""TTTTT-SSSSS""" ,f'{global_shard_id:05d}' ).replace("""NNNNN""" ,f'{total_shards:05d}' ) ,) A_ : int = [] A_ : Union[str, Any] = 0 for i in range(len(_a ) ): A_ , A_ : Any = task_id_and_num_shards[i] for shard_id in range(_a ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_a ,len(_a ) ).map(lambda _a : _rename_shard(*_a ) ).collect() else: # don't use any pattern A_ : str = 0 A_ : Union[str, Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" ,f'{shard_id:05d}' ).replace("""TTTTT""" ,f'{task_id:05d}' ) ,fpath.replace(_a ,"""""" ) ,) def _a ( self : Optional[int] ,_a : "datasets.SplitGenerator" ,): '''simple docstring''' return SparkExamplesIterable(self.df )
27
'''simple docstring''' from ..utils import DummyObject, requires_backends class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""torch""", """torchsde"""] def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ): '''simple docstring''' requires_backends(self ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] ) @classmethod def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ): '''simple docstring''' requires_backends(cls ,["""torch""", """torchsde"""] )
27
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """BlipImageProcessor""" a_ = """AutoTokenizer""" def __init__( self : Optional[Any] ,_a : List[Any] ,_a : Union[str, Any] ,_a : Any ): '''simple docstring''' super().__init__(_a ,_a ) # add QFormer tokenizer A_ : Optional[int] = qformer_tokenizer def __call__( self : str ,_a : ImageInput = None ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Optional[int] ,): '''simple docstring''' if images is None and text is None: raise ValueError("""You have to specify at least images or text.""" ) A_ : Any = BatchFeature() if text is not None: A_ : int = self.tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) encoding.update(_a ) A_ : List[Any] = self.qformer_tokenizer( text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) A_ : List[Any] = qformer_text_encoding.pop("""input_ids""" ) A_ : Union[str, Any] = qformer_text_encoding.pop("""attention_mask""" ) if images is not None: A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a ) encoding.update(_a ) return encoding def _a ( self : List[Any] ,*_a : Dict ,**_a : Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : Dict ,*_a : List[Any] ,**_a : Optional[int] ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : int = self.tokenizer.model_input_names A_ : List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _a ( self : Any ,_a : Tuple ,**_a : Union[str, Any] ): '''simple docstring''' if os.path.isfile(_a ): raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(_a ,exist_ok=_a ) A_ : int = os.path.join(_a ,"""qformer_tokenizer""" ) self.qformer_tokenizer.save_pretrained(_a ) return super().save_pretrained(_a ,**_a ) @classmethod def _a ( cls : Dict ,_a : Dict ,**_a : int ): '''simple docstring''' A_ : Tuple = AutoTokenizer.from_pretrained(_a ,subfolder="""qformer_tokenizer""" ) A_ : List[Any] = cls._get_arguments_from_pretrained(_a ,**_a ) args.append(_a ) return cls(*_a )
27
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"): A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {} A_ : Optional[int] = padding_side return tokenizer( [line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , ) def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ): A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,): '''simple docstring''' super().__init__() A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" ) A_ : Any = Path(_a ).joinpath(type_path + """.target""" ) A_ : Dict = self.get_char_lens(self.src_file ) A_ : Optional[int] = max_source_length A_ : List[str] = max_target_length assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}' A_ : List[Any] = tokenizer A_ : Optional[Any] = prefix if n_obs is not None: A_ : Any = self.src_lens[:n_obs] A_ : Optional[int] = src_lang A_ : Tuple = tgt_lang def __len__( self : Tuple ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : List[str] ,_a : Tuple ): '''simple docstring''' A_ : int = index + 1 # linecache starts at 1 A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" ) A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" ) assert source_line, f'empty source line for index {index}' assert tgt_line, f'empty tgt line for index {index}' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_a ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ : List[str] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer ) A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" ) A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" ) A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze() A_ : Dict = target_inputs["""input_ids"""].squeeze() A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _a ( _a : int ): '''simple docstring''' return [len(_a ) for x in Path(_a ).open().readlines()] def _a ( self : Optional[int] ,_a : Dict ): '''simple docstring''' A_ : str = torch.stack([x["""input_ids"""] for x in batch] ) A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] ) A_ : Union[str, Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : str = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_a ) else self.tokenizer.pad_token_id ) A_ : List[str] = trim_batch(_a ,_a ) A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a ) A_ : List[str] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __magic_name__ = getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[List]): return list(itertools.chain.from_iterable(lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : str): A_ : Union[str, Any] = get_git_info() save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json""")) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]): with open(lowerCamelCase , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase) def lowerCamelCase ( lowerCamelCase : Any): with open(lowerCamelCase) as f: return json.load(lowerCamelCase) def lowerCamelCase ( ): A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase) A_ : Union[str, Any] = { """repo_id""": str(lowerCamelCase), """repo_sha""": str(repo.head.object.hexsha), """repo_branch""": str(repo.active_branch), """hostname""": str(socket.gethostname()), } return repo_infos def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable): return list(map(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]): with open(lowerCamelCase , """wb""") as f: return pickle.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str]): def remove_articles(lowerCamelCase : Any): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase) def white_space_fix(lowerCamelCase : List[Any]): return " ".join(text.split()) def remove_punc(lowerCamelCase : Union[str, Any]): A_ : Optional[int] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(lowerCamelCase : List[str]): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase)))) def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): A_ : Tuple = normalize_answer(lowerCamelCase).split() A_ : Dict = normalize_answer(lowerCamelCase).split() A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase) A_ : Any = sum(common.values()) if num_same == 0: return 0 A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = 1.0 * num_same / len(lowerCamelCase) A_ : Any = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any): return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]): assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Any = 0 for hypo, pred in zip(lowerCamelCase , lowerCamelCase): em += exact_match_score(lowerCamelCase , lowerCamelCase) if len(lowerCamelCase) > 0: em /= len(lowerCamelCase) return {"em": em} def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return model_prefix.startswith("""rag""") def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]): A_ : Optional[Any] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ : Tuple = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase): if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) continue A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p] setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase)) delattr(lowerCamelCase , lowerCamelCase) return hparams, config
27
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int): # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: A_ : Union[str, Any] = ksize + 1 A_ : Union[str, Any] = np.zeros((ksize, ksize) , dtype=np.floataa) # each value for y in range(lowerCamelCase): for x in range(lowerCamelCase): # distance from center A_ : Optional[int] = x - ksize // 2 A_ : Any = y - ksize // 2 # degree to radiant A_ : Dict = theta / 180 * np.pi A_ : int = np.cos(_theta) A_ : List[Any] = np.sin(_theta) # get kernel x A_ : Dict = cos_theta * px + sin_theta * py # get kernel y A_ : Tuple = -sin_theta * px + cos_theta * py # fill kernel A_ : List[Any] = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)) * np.cos(2 * np.pi * _x / lambd + psi) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image __magic_name__ = imread('../image_data/lena.jpg') # turn image in gray scale value __magic_name__ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges __magic_name__ = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: __magic_name__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) __magic_name__ = out / out.max() * 255 __magic_name__ = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
27
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : list): A_ : List[Any] = False while is_sorted is False: # Until all the indices are traversed keep looping A_ : Optional[Any] = True for i in range(0 , len(lowerCamelCase) - 1 , 2): # iterating over all even indices if input_list[i] > input_list[i + 1]: A_ , A_ : Optional[int] = input_list[i + 1], input_list[i] # swapping if elements not in order A_ : Optional[Any] = False for i in range(1 , len(lowerCamelCase) - 1 , 2): # iterating over all odd indices if input_list[i] > input_list[i + 1]: A_ , A_ : Union[str, Any] = input_list[i + 1], input_list[i] # swapping if elements not in order A_ : Tuple = False return input_list if __name__ == "__main__": print('Enter list to be sorted') __magic_name__ = [int(x) for x in input().split()] # inputing elements of the list in one line __magic_name__ = odd_even_sort(input_list) print('The sorted list is') print(sorted_list)
27
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = KandinskyVaaControlnetPipeline a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] a_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a_ = False @property def _a ( self : Any ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return 32 @property def _a ( self : Tuple ): '''simple docstring''' return self.time_input_dim @property def _a ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): '''simple docstring''' return 100 @property def _a ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[Any] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } A_ : Tuple = UNetaDConditionModel(**_a ) return model @property def _a ( self : List[str] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _a ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) A_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : List[str] ): '''simple docstring''' A_ : Optional[Any] = self.dummy_unet A_ : int = self.dummy_movq A_ : Tuple = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,) A_ : int = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ): '''simple docstring''' A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a ) A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( _a ) # create hint A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a ) if str(_a ).startswith("""mps""" ): A_ : Optional[Any] = torch.manual_seed(_a ) else: A_ : str = torch.Generator(device=_a ).manual_seed(_a ) A_ : List[Any] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def _a ( self : Dict ): '''simple docstring''' A_ : List[Any] = """cpu""" A_ : List[str] = self.get_dummy_components() A_ : Tuple = self.pipeline_class(**_a ) A_ : Dict = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) ) A_ : Tuple = output.images A_ : Optional[Any] = pipe( **self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0] A_ : Tuple = image[0, -3:, -3:, -1] A_ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : List[Any] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : Any ): '''simple docstring''' A_ : Tuple = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) A_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0 A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(_a ) A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) A_ : Union[str, Any] = pipeline.to(_a ) pipeline.set_progress_bar_config(disable=_a ) A_ : Optional[Any] = """A robot, 4k photo""" A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ , A_ : List[str] = pipe_prior( _a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 ) A_ : List[Any] = pipeline( image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,) A_ : Dict = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_a ,_a )
27
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowerCamelCase ( lowerCamelCase : str): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def lowerCamelCase ( lowerCamelCase : str): # word like '180' or '身高' or '神' for char in word: A_ : List[Any] = ord(lowerCamelCase) if not _is_chinese_char(lowerCamelCase): return 0 return 1 def lowerCamelCase ( lowerCamelCase : List[str]): A_ : int = set() for token in tokens: A_ : List[str] = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase) if chinese_word: word_set.add(lowerCamelCase) A_ : int = list(lowerCamelCase) return word_list def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()): if not chinese_word_set: return bert_tokens A_ : Optional[int] = max([len(lowerCamelCase) for w in chinese_word_set]) A_ : int = bert_tokens A_ , A_ : Optional[Any] = 0, len(lowerCamelCase) while start < end: A_ : Tuple = True if is_chinese(bert_word[start]): A_ : int = min(end - start , lowerCamelCase) for i in range(lowerCamelCase , 1 , -1): A_ : Any = """""".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1 , start + i): A_ : Optional[Any] = """##""" + bert_word[j] A_ : Dict = start + i A_ : int = False break if single_word: start += 1 return bert_word def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer): A_ : Optional[int] = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : str = ltp_tokenizer.seg(lines[i : i + 100])[0] A_ : Optional[int] = [get_chinese_word(lowerCamelCase) for r in res] ltp_res.extend(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Dict = [] for i in range(0 , len(lowerCamelCase) , 100): A_ : List[str] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512) bert_res.extend(res["""input_ids"""]) assert len(lowerCamelCase) == len(lowerCamelCase) A_ : Optional[Any] = [] for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase): A_ : int = [] for id in input_ids: A_ : Optional[int] = bert_tokenizer._convert_id_to_token(lowerCamelCase) input_tokens.append(lowerCamelCase) A_ : Tuple = add_sub_symbol(lowerCamelCase , lowerCamelCase) A_ : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCamelCase): if token[:2] == "##": A_ : Optional[Any] = token[2:] # save chinese tokens' pos if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)): ref_id.append(lowerCamelCase) ref_ids.append(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) return ref_ids def lowerCamelCase ( lowerCamelCase : List[str]): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""") as f: A_ : Optional[int] = f.readlines() A_ : Tuple = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ : int = LTP(args.ltp) # faster in GPU device A_ : Optional[Any] = BertTokenizer.from_pretrained(args.bert) A_ : Union[str, Any] = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase) with open(args.save_path , """w""" , encoding="""utf-8""") as f: A_ : List[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids] f.writelines(lowerCamelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') __magic_name__ = parser.parse_args() main(args)
27
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """deberta-v2""" def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,): '''simple docstring''' super().__init__(**_a ) A_ : Union[str, Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[Any] = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : List[Any] = initializer_range A_ : int = relative_attention A_ : Tuple = max_relative_positions A_ : int = pad_token_id A_ : Tuple = position_biased_input # Backwards compatibility if type(_a ) == str: A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )] A_ : Any = pos_att_type A_ : Optional[int] = vocab_size A_ : Tuple = layer_norm_eps A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a ) A_ : Union[str, Any] = pooler_dropout A_ : List[Any] = pooler_hidden_act class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def _a ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A_ : Any = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def _a ( self : Optional[int] ): '''simple docstring''' return 12 def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,): '''simple docstring''' A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
27
1
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class __lowerCAmelCase ( datasets.BeamBasedBuilder ): '''simple docstring''' def _a ( self : Optional[int] ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"""content""": datasets.Value("""string""" )} ) ,supervised_keys=_a ,) def _a ( self : List[str] ,_a : Dict ,_a : int ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""examples""": get_test_dummy_examples()} )] def _a ( self : Any ,_a : Union[str, Any] ,_a : Optional[Any] ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_a ) class __lowerCAmelCase ( datasets.BeamBasedBuilder ): '''simple docstring''' def _a ( self : List[str] ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) ,supervised_keys=_a ,) def _a ( self : Optional[Any] ,_a : Union[str, Any] ,_a : Dict ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""examples""": get_test_nested_examples()} ) ] def _a ( self : Tuple ,_a : Tuple ,_a : Dict ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(_a ) def lowerCamelCase ( ): return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""])] def lowerCamelCase ( ): return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""])] class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @require_beam def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Optional[Any] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: A_ : Union[str, Any] = DummyBeamDataset(cache_dir=_a ,beam_runner="""DirectRunner""" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_a ,builder.name ,"""default""" ,"""0.0.0""" ,f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features ,datasets.Features({"""content""": datasets.Value("""string""" )} ) ) A_ : Optional[Any] = builder.as_dataset() self.assertEqual(dset["""train"""].num_rows ,_a ) self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,_a ) self.assertDictEqual(dset["""train"""][0] ,get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["""train"""][expected_num_examples - 1] ,get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_a ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) ) del dset @require_beam def _a ( self : Dict ): '''simple docstring''' import apache_beam as beam A_ : Dict = beam.io.parquetio.WriteToParquet A_ : Optional[int] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: A_ : List[Any] = DummyBeamDataset(cache_dir=_a ,beam_runner="""DirectRunner""" ) with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock: A_ : int = partial(_a ,num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( _a ,builder.name ,"""default""" ,"""0.0.0""" ,f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( _a ,builder.name ,"""default""" ,"""0.0.0""" ,f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features ,datasets.Features({"""content""": datasets.Value("""string""" )} ) ) A_ : int = builder.as_dataset() self.assertEqual(dset["""train"""].num_rows ,_a ) self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,_a ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["""train"""]["""content"""] ) ,sorted(["""foo""", """bar""", """foobar"""] ) ) self.assertTrue( os.path.exists(os.path.join(_a ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) ) del dset @require_beam def _a ( self : Optional[int] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: A_ : List[Any] = DummyBeamDataset(cache_dir=_a ) self.assertRaises(datasets.builder.MissingBeamOptions ,builder.download_and_prepare ) @require_beam def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: A_ : str = NestedBeamDataset(cache_dir=_a ,beam_runner="""DirectRunner""" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(_a ,builder.name ,"""default""" ,"""0.0.0""" ,f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features ,datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) ) A_ : Tuple = builder.as_dataset() self.assertEqual(dset["""train"""].num_rows ,_a ) self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples ,_a ) self.assertDictEqual(dset["""train"""][0] ,get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["""train"""][expected_num_examples - 1] ,get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(_a ,builder.name ,"""default""" ,"""0.0.0""" ,"""dataset_info.json""" ) ) ) del dset
27
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') __magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:]) __magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) __magic_name__ = BeautifulSoup(res.text, 'html.parser') __magic_name__ = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(f"""https://google.com{link.get('href')}""")
27
1
'''simple docstring''' from __future__ import annotations import time import numpy as np __magic_name__ = [8, 5, 9, 7] __magic_name__ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __magic_name__ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class __lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] ,_a : list[int] ,_a : list[list[int]] ,_a : list[list[int]] ,): '''simple docstring''' A_ : List[Any] = claim_vector A_ : int = allocated_resources_table A_ : Dict = maximum_claim_table def _a ( self : Tuple ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _a ( self : int ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _a ( self : Dict ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _a ( self : Optional[int] ): '''simple docstring''' return {self.__need().index(_a ): i for i in self.__need()} def _a ( self : Optional[int] ,**_a : int ): '''simple docstring''' A_ : Dict = self.__need() A_ : List[str] = self.__allocated_resources_table A_ : Optional[Any] = self.__available_resources() A_ : Any = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: A_ : List[str] = False for each_need in need_list: A_ : Tuple = True for index, need in enumerate(_a ): if need > available_resources[index]: A_ : Optional[Any] = False break if execution: A_ : Optional[Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: A_ : List[Any] = original_need_index print(f'Process {process_number + 1} is executing.' ) # remove the process run from stack need_list.remove(_a ) # update available/freed resources stack A_ : Optional[Any] = np.array(_a ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(_a ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def _a ( self : Dict ): '''simple docstring''' print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( f'P{self.__allocated_resources_table.index(_a ) + 1}' + """ """.join(f'{it:>8}' for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( f'P{self.__maximum_claim_table.index(_a ) + 1}' + """ """.join(f'{it:>8}' for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(_a ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(_a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
27
'''simple docstring''' from ... import PretrainedConfig __magic_name__ = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP a_ = """nezha""" def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,): '''simple docstring''' super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a ) A_ : Tuple = vocab_size A_ : int = hidden_size A_ : Any = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Tuple = hidden_act A_ : List[Any] = intermediate_size A_ : List[str] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : Dict = max_position_embeddings A_ : Optional[Any] = max_relative_position A_ : List[Any] = type_vocab_size A_ : int = initializer_range A_ : Tuple = layer_norm_eps A_ : Dict = classifier_dropout A_ : int = use_cache
27
1
'''simple docstring''' import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def _a ( self : int ,_a : Optional[int]=0 ): '''simple docstring''' A_ : Any = np.random.RandomState(_a ) A_ : List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_a ) A_ : Union[str, Any] = self.get_dummy_inputs() A_ : List[Any] = pipe(**_a ).images A_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : List[Any] = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Any ): '''simple docstring''' A_ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : Any = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=_a ) pipe.set_progress_bar_config(disable=_a ) A_ : Dict = self.get_dummy_inputs() A_ : Union[str, Any] = pipe(**_a ).images A_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : int = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Dict ): '''simple docstring''' A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) A_ : Optional[int] = self.get_dummy_inputs() A_ : Optional[int] = pipe(**_a ).images A_ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : int = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) A_ : Dict = self.get_dummy_inputs() A_ : Union[str, Any] = pipe(**_a ).images A_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Dict = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Optional[Any] ): '''simple docstring''' A_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) A_ : Union[str, Any] = self.get_dummy_inputs() A_ : Any = pipe(**_a ).images A_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Tuple = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : List[Any] ): '''simple docstring''' A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) A_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = self.get_dummy_inputs() A_ : int = pipe(**_a ).images A_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A_ : Union[str, Any] = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Any = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_a ) A_ : str = self.get_dummy_inputs() A_ : List[str] = 3 * [inputs["""prompt"""]] # forward A_ : Optional[Any] = pipe(**_a ) A_ : Any = output.images[0, -3:, -3:, -1] A_ : str = self.get_dummy_inputs() A_ : Tuple = 3 * [inputs.pop("""prompt""" )] A_ : List[Any] = pipe.tokenizer( _a ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors="""np""" ,) A_ : Union[str, Any] = text_inputs["""input_ids"""] A_ : Tuple = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] A_ : Optional[int] = prompt_embeds # forward A_ : int = pipe(**_a ) A_ : Union[str, Any] = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 def _a ( self : Dict ): '''simple docstring''' A_ : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_a ) A_ : Tuple = self.get_dummy_inputs() A_ : Union[str, Any] = 3 * ["""this is a negative prompt"""] A_ : Any = negative_prompt A_ : Optional[Any] = 3 * [inputs["""prompt"""]] # forward A_ : Tuple = pipe(**_a ) A_ : Any = output.images[0, -3:, -3:, -1] A_ : List[Any] = self.get_dummy_inputs() A_ : List[str] = 3 * [inputs.pop("""prompt""" )] A_ : List[Any] = [] for p in [prompt, negative_prompt]: A_ : int = pipe.tokenizer( _a ,padding="""max_length""" ,max_length=pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors="""np""" ,) A_ : Tuple = text_inputs["""input_ids"""] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) A_ , A_ : Union[str, Any] = embeds # forward A_ : List[Any] = pipe(**_a ) A_ : str = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @property def _a ( self : Tuple ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _a ( self : Any ): '''simple docstring''' A_ : Union[str, Any] = ort.SessionOptions() A_ : int = False return options def _a ( self : Any ): '''simple docstring''' A_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_a ) A_ : Optional[int] = """A painting of a squirrel eating a burger""" np.random.seed(0 ) A_ : Dict = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=10 ,output_type="""np""" ) A_ : Tuple = output.images A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : Tuple = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Optional[int] ): '''simple docstring''' A_ : Dict = DDIMScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" ) A_ : int = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_a ) A_ : Dict = """open neural network exchange""" A_ : Optional[int] = np.random.RandomState(0 ) A_ : Union[str, Any] = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type="""np""" ) A_ : Tuple = output.images A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : str = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Any ): '''simple docstring''' A_ : str = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,subfolder="""scheduler""" ,revision="""onnx""" ) A_ : List[str] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) sd_pipe.set_progress_bar_config(disable=_a ) A_ : Optional[int] = """open neural network exchange""" A_ : Optional[Any] = np.random.RandomState(0 ) A_ : int = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=_a ,output_type="""np""" ) A_ : str = output.images A_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A_ : str = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _a ( self : Any ): '''simple docstring''' A_ : int = 0 def test_callback_fn(_a : int ,_a : int ,_a : np.ndarray ) -> None: A_ : Any = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) A_ : Union[str, Any] = latents[0, -3:, -3:, -1] A_ : str = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) A_ : Any = latents[0, -3:, -3:, -1] A_ : int = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3 A_ : int = False A_ : int = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=_a ) A_ : List[str] = """Andromeda galaxy in a bottle""" A_ : List[Any] = np.random.RandomState(0 ) pipe( prompt=_a ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=_a ,callback=_a ,callback_steps=1 ,) assert test_callback_fn.has_been_called assert number_of_steps == 6 def _a ( self : Dict ): '''simple docstring''' A_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" ,revision="""onnx""" ,safety_checker=_a ,feature_extractor=_a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) assert isinstance(_a ,_a ) assert pipe.safety_checker is None A_ : List[str] = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_a ) A_ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_a ) # sanity check that the pipeline still works assert pipe.safety_checker is None A_ : int = pipe("""example prompt""" ,num_inference_steps=2 ).images[0] assert image is not None
27
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str): A_ , A_ : List[Any] = set(lowerCamelCase), [start] while stack: A_ : Optional[Any] = stack.pop() explored.add(lowerCamelCase) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v]): if adj not in explored: stack.append(lowerCamelCase) return explored __magic_name__ = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
27
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __magic_name__ = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""pixel_values"""] def __init__( self : Tuple ,_a : bool = True ,_a : int = 32 ,_a : List[Any]=PILImageResampling.BILINEAR ,_a : bool = True ,**_a : Optional[Any] ,): '''simple docstring''' A_ : Union[str, Any] = do_resize A_ : List[Any] = do_rescale A_ : int = size_divisor A_ : Optional[Any] = resample super().__init__(**_a ) def _a ( self : Optional[int] ,_a : np.ndarray ,_a : int ,_a : Any ,_a : Optional[ChannelDimension] = None ,**_a : List[str] ): '''simple docstring''' A_ , A_ : List[Any] = get_image_size(_a ) # Rounds the height and width down to the closest multiple of size_divisor A_ : str = height // size_divisor * size_divisor A_ : Optional[Any] = width // size_divisor * size_divisor A_ : List[str] = resize(_a ,(new_h, new_w) ,resample=_a ,data_format=_a ,**_a ) return image def _a ( self : List[Any] ,_a : np.ndarray ,_a : float ,_a : Optional[ChannelDimension] = None ,**_a : int ): '''simple docstring''' return rescale(image=_a ,scale=_a ,data_format=_a ,**_a ) def _a ( self : Union[str, Any] ,_a : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,_a : Optional[bool] = None ,_a : Optional[int] = None ,_a : Optional[Any]=None ,_a : Optional[bool] = None ,_a : Optional[Union[TensorType, str]] = None ,_a : ChannelDimension = ChannelDimension.FIRST ,**_a : List[str] ,): '''simple docstring''' A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale A_ : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor A_ : Union[str, Any] = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("""size_divisor is required for resizing""" ) A_ : Optional[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError("""Invalid image(s)""" ) # All transformations expect numpy arrays. A_ : Optional[Any] = [to_numpy_array(_a ) for img in images] if do_resize: A_ : Any = [self.resize(_a ,size_divisor=_a ,resample=_a ) for image in images] if do_rescale: A_ : Dict = [self.rescale(_a ,scale=1 / 255 ) for image in images] A_ : Optional[Any] = [to_channel_dimension_format(_a ,_a ) for image in images] A_ : Tuple = {"""pixel_values""": images} return BatchFeature(data=_a ,tensor_type=_a )
27
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) def lowerCamelCase ( lowerCamelCase : Dict): A_ : List[str] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: A_ : Union[str, Any] = [144, 192, 240] A_ : int = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: A_ : List[str] = [96, 120, 144] A_ : Any = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: A_ : Any = [64, 80, 96] A_ : List[str] = [16, 16, 24, 48, 64, 80, 320] A_ : Any = 0.05 A_ : List[Any] = 2.0 if mobilevit_name.startswith("""deeplabv3_"""): A_ : int = 512 A_ : Optional[int] = 16 A_ : List[Any] = 21 A_ : List[str] = """pascal-voc-id2label.json""" else: A_ : str = 1000 A_ : Any = """imagenet-1k-id2label.json""" A_ : Any = """huggingface/label-files""" A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r""")) A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False): for i in range(1 , 6): if F'layer_{i}.' in name: A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.') if "conv_1." in name: A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""") if ".block." in name: A_ : Optional[Any] = name.replace(""".block.""" , """.""") if "exp_1x1" in name: A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""") if "red_1x1" in name: A_ : int = name.replace("""red_1x1""" , """reduce_1x1""") if ".local_rep.conv_3x3." in name: A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""") if ".local_rep.conv_1x1." in name: A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""") if ".norm." in name: A_ : Tuple = name.replace(""".norm.""" , """.normalization.""") if ".conv." in name: A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""") if ".conv_proj." in name: A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""") for i in range(0 , 2): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.') for i in range(2 , 6): for j in range(0 , 4): if F'.{i}.{j}.' in name: A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.') if "expand_1x1" in name: A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""") if "conv_3x3" in name: A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""") if "reduce_1x1" in name: A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""") for i in range(2 , 5): if F'.global_rep.{i}.weight' in name: A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""") if F'.global_rep.{i}.bias' in name: A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""") if ".global_rep." in name: A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""") if ".pre_norm_mha.0." in name: A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""") if ".pre_norm_mha.1.out_proj." in name: A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""") if ".pre_norm_ffn.0." in name: A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""") if ".pre_norm_ffn.1." in name: A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""") if ".pre_norm_ffn.4." in name: A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""") if ".transformer." in name: A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""") if ".aspp_layer." in name: A_ : int = name.replace(""".aspp_layer.""" , """.""") if ".aspp_pool." in name: A_ : Tuple = name.replace(""".aspp_pool.""" , """.""") if "seg_head." in name: A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""") if "segmentation_head.classifier.classifier." in name: A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""") if "classifier.fc." in name: A_ : str = name.replace("""classifier.fc.""" , """classifier.""") elif (not base_model) and ("segmentation_head." not in name): A_ : str = """mobilevit.""" + name return name def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False): if base_model: A_ : Dict = """""" else: A_ : Any = """mobilevit.""" for key in orig_state_dict.copy().keys(): A_ : List[Any] = orig_state_dict.pop(lowerCamelCase) if key[:8] == "encoder.": A_ : int = key[8:] if "qkv" in key: A_ : Any = key.split(""".""") A_ : str = int(key_split[0][6:]) - 1 A_ : int = int(key_split[3]) A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}') A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size A_ : Optional[Any] = ( F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.' ) if "weight" in key: A_ : Dict = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : List[Any] = val[-dim:, :] else: A_ : Optional[Any] = val[:dim] A_ : List[Any] = val[dim : dim * 2] A_ : Any = val[-dim:] else: A_ : List[str] = val return orig_state_dict def lowerCamelCase ( ): A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw) return im @torch.no_grad() def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False): A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase) # load original state_dict A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""") # load 🤗 model if mobilevit_name.startswith("""deeplabv3_"""): A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval() else: A_ : str = MobileViTForImageClassification(lowerCamelCase).eval() A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase) model.load_state_dict(lowerCamelCase) # Check outputs on an image, prepared by MobileViTImageProcessor A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32) A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""") A_ : List[Any] = model(**lowerCamelCase) A_ : Dict = outputs.logits if mobilevit_name.startswith("""deeplabv3_"""): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": A_ : int = torch.tensor( [ [[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]], [[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]], [[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xs": A_ : Tuple = torch.tensor( [ [[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]], [[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]], [[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]], ]) elif mobilevit_name == "deeplabv3_mobilevit_xxs": A_ : Tuple = torch.tensor( [ [[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]], [[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]], [[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]], ]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241]) elif mobilevit_name == "mobilevit_xs": A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587]) elif mobilevit_name == "mobilevit_xxs": A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653]) else: raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}') assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4) Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}') model.save_pretrained(lowerCamelCase) print(F'Saving image processor to {pytorch_dump_folder_path}') image_processor.save_pretrained(lowerCamelCase) if push_to_hub: A_ : str = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print("""Pushing to the hub...""") A_ : Union[str, Any] = model_mapping[mobilevit_name] image_processor.push_to_hub(lowerCamelCase , organization="""apple""") model.push_to_hub(lowerCamelCase , organization="""apple""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--mobilevit_name', default='mobilevit_s', type=str, help=( 'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',' ' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.' ), ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __magic_name__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
27
1