code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Any = ShapEPipeline
snake_case :Tuple = ["prompt"]
snake_case :Dict = ["prompt"]
snake_case :Tuple = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
snake_case :Any = False
@property
def _snake_case ( self ):
return 32
@property
def _snake_case ( self ):
return 32
@property
def _snake_case ( self ):
return self.time_input_dim * 4
@property
def _snake_case ( self ):
return 8
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(UpperCamelCase_ )
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__UpperCAmelCase : int = PriorTransformer(**UpperCamelCase_ )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : int = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__UpperCAmelCase : Optional[Any] = ShapERenderer(**UpperCamelCase_ )
return model
def _snake_case ( self ):
__UpperCAmelCase : Any = self.dummy_prior
__UpperCAmelCase : Tuple = self.dummy_text_encoder
__UpperCAmelCase : List[Any] = self.dummy_tokenizer
__UpperCAmelCase : Dict = self.dummy_renderer
__UpperCAmelCase : Optional[Any] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__UpperCAmelCase : Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Any = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : List[str] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Optional[int] = self.pipeline_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__UpperCAmelCase : Any = output.images[0]
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
__UpperCAmelCase : int = torch_device == "cpu"
__UpperCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**UpperCamelCase_ )
__UpperCAmelCase : int = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__UpperCAmelCase : Dict = batch_size * [inputs[key]]
__UpperCAmelCase : Any = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A (unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
__UpperCAmelCase : Tuple = ShapEPipeline.from_pretrained("openai/shap-e" )
__UpperCAmelCase : Dict = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = pipe(
"a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 705 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 0 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_a : Optional[int] = logging.get_logger(__name__)
class __A (__magic_name__ ):
snake_case :Any = "vision-encoder-decoder"
snake_case :List[str] = True
def __init__( self , **UpperCamelCase_ ):
super().__init__(**UpperCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
__UpperCAmelCase : List[Any] = kwargs.pop("encoder" )
__UpperCAmelCase : int = encoder_config.pop("model_type" )
__UpperCAmelCase : str = kwargs.pop("decoder" )
__UpperCAmelCase : Tuple = decoder_config.pop("model_type" )
__UpperCAmelCase : Optional[int] = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[str] = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = True
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Optional[int] = self.encoder.to_dict()
__UpperCAmelCase : str = self.decoder.to_dict()
__UpperCAmelCase : int = self.__class__.model_type
return output
class __A (__magic_name__ ):
snake_case :str = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ):
return 1E-4
@property
def _snake_case ( self ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
__UpperCAmelCase : int = OrderedDict()
__UpperCAmelCase : Tuple = {0: "batch", 1: "past_decoder_sequence + sequence"}
__UpperCAmelCase : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
__UpperCAmelCase : Dict = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ):
import torch
__UpperCAmelCase : Optional[int] = OrderedDict()
__UpperCAmelCase : Any = super().generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
__UpperCAmelCase : List[str] = dummy_input["input_ids"].shape
__UpperCAmelCase : int = (batch, encoder_sequence, self._config.encoder_hidden_size)
__UpperCAmelCase : int = dummy_input.pop("input_ids" )
__UpperCAmelCase : Optional[int] = dummy_input.pop("attention_mask" )
__UpperCAmelCase : Tuple = torch.zeros(UpperCamelCase_ )
return common_inputs
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
pass
def _snake_case ( self , UpperCamelCase_ ):
return VisionEncoderDecoderEncoderOnnxConfig(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = "default" ):
__UpperCAmelCase : str = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCamelCase_ , UpperCamelCase_ )
| 706 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : List[Any] = logging.getLogger(__name__)
@dataclass
class __A :
snake_case :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case :Optional[str] = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case :bool = field(default=__magic_name__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __A :
snake_case :str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
snake_case :int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
__UpperCAmelCase : List[str] = import_module("tasks" )
try:
__UpperCAmelCase : Optional[int] = getattr(lowerCamelCase__ , model_args.task_type )
__UpperCAmelCase : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__UpperCAmelCase : Any = token_classification_task.get_labels(data_args.labels )
__UpperCAmelCase : Dict[int, str] = dict(enumerate(lowerCamelCase__ ) )
__UpperCAmelCase : Any = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , cache_dir=model_args.cache_dir , )
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__UpperCAmelCase : Union[str, Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCAmelCase : Tuple = (
TokenClassificationDataset(
token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCAmelCase : int = (
TokenClassificationDataset(
token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(lowerCamelCase__ , lowerCamelCase__ ) -> Tuple[List[int], List[int]]:
__UpperCAmelCase : Tuple = np.argmax(lowerCamelCase__ , axis=2 )
__UpperCAmelCase : str = preds.shape
__UpperCAmelCase : List[Any] = [[] for _ in range(lowerCamelCase__ )]
__UpperCAmelCase : Union[str, Any] = [[] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(lowerCamelCase__ ) -> Dict:
__UpperCAmelCase : Any = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowerCamelCase__ , lowerCamelCase__ ),
"precision": precision_score(lowerCamelCase__ , lowerCamelCase__ ),
"recall": recall_score(lowerCamelCase__ , lowerCamelCase__ ),
"f1": fa_score(lowerCamelCase__ , lowerCamelCase__ ),
}
# Data collator
__UpperCAmelCase : Dict = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCAmelCase : List[str] = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase : List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : Tuple = trainer.evaluate()
__UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCamelCase__ , lowerCamelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCamelCase__ )
# Predict
if training_args.do_predict:
__UpperCAmelCase : List[Any] = TokenClassificationDataset(
token_classification_task=lowerCamelCase__ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , labels=lowerCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__UpperCAmelCase : List[Any] = trainer.predict(lowerCamelCase__ )
__UpperCAmelCase : int = align_predictions(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : str = os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , lowerCamelCase__ , lowerCamelCase__ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
__UpperCAmelCase : Any = os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return results
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 707 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A (__magic_name__ ):
snake_case :Optional[int] = ["image_processor", "tokenizer"]
snake_case :str = "AutoImageProcessor"
snake_case :Optional[Any] = "AutoTokenizer"
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : int = self.image_processor
def __call__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__UpperCAmelCase : str = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__UpperCAmelCase : int = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def _snake_case ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 708 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_a : Dict = logging.get_logger(__name__)
# TODO: upload to AWS
_a : Optional[Any] = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "retribert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=8 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=True , UpperCamelCase_=1_28 , UpperCamelCase_=0 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = share_encoders
__UpperCAmelCase : Tuple = projection_dim
| 709 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 0 |
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_a : str = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
_a : List[Any] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
_a : List[str] = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
_a : Optional[Any] = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_a : List[str] = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 710 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __A :
snake_case :int = BlenderbotConfig
snake_case :int = {}
snake_case :Dict = "gelu"
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : str = is_training
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = eos_token_id
__UpperCAmelCase : Union[str, Any] = pad_token_id
__UpperCAmelCase : Union[str, Any] = bos_token_id
def _snake_case ( self ):
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = TFBlenderbotModel(config=UpperCamelCase_ ).get_decoder()
__UpperCAmelCase : Union[str, Any] = inputs_dict["input_ids"]
__UpperCAmelCase : Dict = input_ids[:1, :]
__UpperCAmelCase : int = inputs_dict["attention_mask"][:1, :]
__UpperCAmelCase : Union[str, Any] = inputs_dict["head_mask"]
__UpperCAmelCase : Union[str, Any] = 1
# first forward pass
__UpperCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCAmelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCAmelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCAmelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
__UpperCAmelCase : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
snake_case :str = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
snake_case :Optional[int] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case :Tuple = True
snake_case :Any = False
snake_case :Any = False
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = TFBlenderbotModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_tokenizers
@require_tf
class __A (unittest.TestCase ):
snake_case :Union[str, Any] = ["My friends are cool but they eat too many carbs."]
snake_case :Tuple = "facebook/blenderbot-400M-distill"
@cached_property
def _snake_case ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.tokenizer(self.src_text , return_tensors="tf" )
__UpperCAmelCase : List[str] = self.model.generate(
model_inputs.input_ids , )
__UpperCAmelCase : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 711 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "efficientnet"
def __init__( self , UpperCamelCase_ = 3 , UpperCamelCase_ = 6_00 , UpperCamelCase_ = 2.0 , UpperCamelCase_ = 3.1 , UpperCamelCase_ = 8 , UpperCamelCase_ = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase_ = [32, 16, 24, 40, 80, 1_12, 1_92] , UpperCamelCase_ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , UpperCamelCase_ = [] , UpperCamelCase_ = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase_ = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase_ = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase_ = 0.2_5 , UpperCamelCase_ = "swish" , UpperCamelCase_ = 25_60 , UpperCamelCase_ = "mean" , UpperCamelCase_ = 0.0_2 , UpperCamelCase_ = 0.0_0_1 , UpperCamelCase_ = 0.9_9 , UpperCamelCase_ = 0.5 , UpperCamelCase_ = 0.2 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : List[Any] = width_coefficient
__UpperCAmelCase : Any = depth_coefficient
__UpperCAmelCase : Optional[Any] = depth_divisor
__UpperCAmelCase : Any = kernel_sizes
__UpperCAmelCase : str = in_channels
__UpperCAmelCase : Optional[Any] = out_channels
__UpperCAmelCase : List[str] = depthwise_padding
__UpperCAmelCase : List[str] = strides
__UpperCAmelCase : Dict = num_block_repeats
__UpperCAmelCase : Optional[int] = expand_ratios
__UpperCAmelCase : Optional[int] = squeeze_expansion_ratio
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Any = hidden_dim
__UpperCAmelCase : List[Any] = pooling_type
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[Any] = batch_norm_eps
__UpperCAmelCase : List[str] = batch_norm_momentum
__UpperCAmelCase : Optional[Any] = dropout_rate
__UpperCAmelCase : Optional[Any] = drop_connect_rate
__UpperCAmelCase : str = sum(UpperCamelCase_ ) * 4
class __A (__magic_name__ ):
snake_case :List[str] = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ):
return 1E-5
| 712 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
_a : Optional[Any] = 8.9_88e9 # units = N * m^s * C^-2
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> dict[str, float]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
__UpperCAmelCase : Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__UpperCAmelCase : List[str] = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__UpperCAmelCase : str = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__UpperCAmelCase : Union[str, Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCamelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : Any = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : Any = logging.getLogger(__name__)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __A :
snake_case :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case :Optional[str] = field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __A :
snake_case :str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
snake_case :str = field(metadata={"help": "Should contain the data files for the task."} )
snake_case :int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case :bool = field(
default=__magic_name__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
try:
__UpperCAmelCase : Tuple = processors[data_args.task_name]()
__UpperCAmelCase : str = processor.get_labels()
__UpperCAmelCase : str = len(lowerCamelCase__ )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCAmelCase : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCAmelCase : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCamelCase__ ) -> Dict:
__UpperCAmelCase : int = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCamelCase__ , p.label_ids )}
# Data collator
__UpperCAmelCase : Optional[Any] = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCAmelCase : Dict = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase : Optional[int] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__UpperCAmelCase : int = trainer.evaluate()
__UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(lowerCamelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , lowerCamelCase__ , lowerCamelCase__ )
writer.write("%s = %s\n" % (key, value) )
results.update(lowerCamelCase__ )
return results
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 715 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_a : Dict = datasets.utils.logging.get_logger(__name__)
class __A (folder_based_builder.FolderBasedBuilderConfig ):
snake_case :bool = None
snake_case :bool = None
class __A (folder_based_builder.FolderBasedBuilder ):
snake_case :Dict = datasets.Audio()
snake_case :List[str] = "audio"
snake_case :List[str] = AudioFolderConfig
snake_case :List[str] # definition at the bottom of the script
snake_case :Optional[Any] = AudioClassification(audio_column="audio" , label_column="label" )
_a : Tuple = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_a : Tuple = AUDIO_EXTENSIONS
| 716 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 0 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __A (__magic_name__ ):
snake_case :int = (DDIMParallelScheduler,)
snake_case :Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : Dict = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCamelCase_ )
return config
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config(**UpperCamelCase_ )
__UpperCAmelCase : int = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Any = 10, 0.0
__UpperCAmelCase : List[Any] = self.dummy_model()
__UpperCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for t in scheduler.timesteps:
__UpperCAmelCase : Tuple = model(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def _snake_case ( self ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def _snake_case ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
__UpperCAmelCase : List[str] = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
__UpperCAmelCase : List[Any] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def _snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def _snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def _snake_case ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def _snake_case ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase_ )
def _snake_case ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ )
def _snake_case ( self ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def _snake_case ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase_ )
def _snake_case ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ )
def _snake_case ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.scheduler_classes[0]
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : Optional[Any] = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.0_2 ) ) < 1E-5
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase : Optional[int] = self.get_scheduler_config()
__UpperCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : int = 10, 0.0
scheduler.set_timesteps(UpperCamelCase_ )
__UpperCAmelCase : Any = self.dummy_model()
__UpperCAmelCase : str = self.dummy_sample_deter
__UpperCAmelCase : Optional[Any] = self.dummy_sample_deter + 0.1
__UpperCAmelCase : List[str] = self.dummy_sample_deter - 0.1
__UpperCAmelCase : str = samplea.shape[0]
__UpperCAmelCase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
__UpperCAmelCase : List[Any] = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ )
__UpperCAmelCase : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__UpperCAmelCase : Tuple = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ )
__UpperCAmelCase : int = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : str = self.full_loop()
__UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" )
__UpperCAmelCase : Any = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 717 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__UpperCAmelCase : Optional[Any] = {"+", "-", "*", "/"}
__UpperCAmelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
__UpperCAmelCase : Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowerCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 0 |
'''simple docstring'''
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 719 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
_a : List[Any] = "2020.9.26"
_a : int = "xcodz-dot, cclaus, dhruvmanila"
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(lowerCamelCase__ , (float, int) ) for val in locals().values() ):
__UpperCAmelCase : Dict = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(lowerCamelCase__ )
__UpperCAmelCase : List[str] = ((x * distance) / (z + distance)) * scale
__UpperCAmelCase : Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("Axis must be a str" )
__UpperCAmelCase : int = locals()
del input_variables["axis"]
if not all(isinstance(lowerCamelCase__ , (float, int) ) for val in input_variables.values() ):
__UpperCAmelCase : int = (
"Input values except axis must either be float or int: "
f"""{list(input_variables.values() )}"""
)
raise TypeError(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__UpperCAmelCase : Any = x * math.cos(lowerCamelCase__ ) - y * math.sin(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = y * math.cos(lowerCamelCase__ ) + x * math.sin(lowerCamelCase__ )
__UpperCAmelCase : List[Any] = z
elif axis == "x":
__UpperCAmelCase : Union[str, Any] = y * math.cos(lowerCamelCase__ ) - z * math.sin(lowerCamelCase__ )
__UpperCAmelCase : Optional[int] = z * math.cos(lowerCamelCase__ ) + y * math.sin(lowerCamelCase__ )
__UpperCAmelCase : Dict = x
elif axis == "y":
__UpperCAmelCase : Optional[int] = x * math.cos(lowerCamelCase__ ) - z * math.sin(lowerCamelCase__ )
__UpperCAmelCase : Union[str, Any] = z * math.cos(lowerCamelCase__ ) + x * math.sin(lowerCamelCase__ )
__UpperCAmelCase : Tuple = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 720 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
_a : Tuple = 1.60_21e-19 # units = C
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2 , UpperCamelCase_=99 , UpperCamelCase_=0 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_="last" , UpperCamelCase_=None , UpperCamelCase_=None , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : Any = use_input_lengths
__UpperCAmelCase : Optional[Any] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Union[str, Any] = gelu_activation
__UpperCAmelCase : Tuple = sinusoidal_embeddings
__UpperCAmelCase : str = causal
__UpperCAmelCase : Union[str, Any] = asm
__UpperCAmelCase : Optional[int] = n_langs
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = n_special
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : str = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Union[str, Any] = num_choices
__UpperCAmelCase : Union[str, Any] = summary_type
__UpperCAmelCase : Dict = use_proj
__UpperCAmelCase : Dict = scope
def _snake_case ( self ):
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_lengths:
__UpperCAmelCase : Union[str, Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCAmelCase : str = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , 2 ).float()
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : int = model(UpperCamelCase_ , lengths=UpperCamelCase_ , langs=UpperCamelCase_ )
__UpperCAmelCase : int = model(UpperCamelCase_ , langs=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : List[str] = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : Tuple = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : int = model(UpperCamelCase_ )
__UpperCAmelCase : Dict = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : Dict = model(UpperCamelCase_ )
__UpperCAmelCase : str = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , p_mask=UpperCamelCase_ , )
__UpperCAmelCase : Dict = model(
UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , cls_index=UpperCamelCase_ , is_impossible=UpperCamelCase_ , )
(__UpperCAmelCase ) : str = result_with_labels.to_tuple()
__UpperCAmelCase : List[Any] = model(UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
(__UpperCAmelCase ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Optional[Any] = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : Any = model(UpperCamelCase_ )
__UpperCAmelCase : int = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : Optional[Any] = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__UpperCAmelCase : Optional[Any] = self.num_choices
__UpperCAmelCase : Dict = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : Dict = config_and_inputs
__UpperCAmelCase : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Tuple = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case :Optional[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
__UpperCAmelCase : Tuple = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
__UpperCAmelCase : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase_ )
return inputs_dict
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = FlaubertModelTester(self )
__UpperCAmelCase : int = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def _snake_case ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Tuple = model_class(config=UpperCamelCase_ )
__UpperCAmelCase : Dict = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.jit.trace(
UpperCamelCase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , "traced_model.pt" ) )
__UpperCAmelCase : Dict = torch.jit.load(os.path.join(UpperCamelCase_ , "traced_model.pt" ) , map_location=UpperCamelCase_ )
loaded(inputs_dict["input_ids"].to(UpperCamelCase_ ) , inputs_dict["attention_mask"].to(UpperCamelCase_ ) )
@require_torch
class __A (unittest.TestCase ):
@slow
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
__UpperCAmelCase : int = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__UpperCAmelCase : Any = model(UpperCamelCase_ )[0]
__UpperCAmelCase : List[Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCamelCase_ )
__UpperCAmelCase : int = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 700 | '''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_a : str = datasets.load_iris()
_a : List[Any] = np.array(data["data"])
_a : Optional[Any] = np.array(data["target"])
_a : Dict = data["target_names"]
_a , _a , _a , _a : Any = train_test_split(X, y)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
__UpperCAmelCase : int = []
for data_point in data:
__UpperCAmelCase : Optional[Any] = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__UpperCAmelCase : Union[str, Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__UpperCAmelCase : Dict = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 10 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_a : List[Any] = logging.get_logger(__name__)
def _lowercase ( lowerCamelCase__ ) -> List[int]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , np.ndarray ):
return list(tensor.shape )
__UpperCAmelCase : List[Any] = tf.shape(lowerCamelCase__ )
if tensor.shape == tf.TensorShape(lowerCamelCase__ ):
return dynamic
__UpperCAmelCase : List[str] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase__ )]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowerCamelCase__ , name=lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1e-5 , lowerCamelCase__=-1 ) -> str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
__UpperCAmelCase : Tuple = tf.nn.moments(lowerCamelCase__ , axes=[axis] , keepdims=lowerCamelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__UpperCAmelCase : Dict = [1] * inputs.shape.rank
__UpperCAmelCase : Any = shape_list(lowerCamelCase__ )[axis]
__UpperCAmelCase : Tuple = tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : str = tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
# Compute layer normalization using the batch_normalization
# function.
__UpperCAmelCase : List[str] = tf.nn.batch_normalization(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , offset=lowerCamelCase__ , scale=lowerCamelCase__ , variance_epsilon=lowerCamelCase__ , )
return outputs
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=0 , lowerCamelCase__=-1 ) -> str:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__UpperCAmelCase : List[Any] = tf.shape(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__UpperCAmelCase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(lowerCamelCase__ , tf.Tensor ):
__UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(lowerCamelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__UpperCAmelCase : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__UpperCAmelCase : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__UpperCAmelCase : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
lowerCamelCase__ , tf.cast(lowerCamelCase__ , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase__ )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Tuple = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__UpperCAmelCase : List[Any] = [x for x in data if len(lowerCamelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
__UpperCAmelCase : Any = np.asarray(lowerCamelCase__ )
__UpperCAmelCase : str = 1
__UpperCAmelCase : Tuple = np.array_split(lowerCamelCase__ , lowerCamelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__UpperCAmelCase : Tuple = np.array_split(lowerCamelCase__ , lowerCamelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase__ ):
__UpperCAmelCase : Tuple = chunk_data
else:
__UpperCAmelCase : int = data
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
if name in group.attrs:
__UpperCAmelCase : Optional[int] = [n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs[name]]
else:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase__ )
| 701 | '''simple docstring'''
class __A :
def __init__( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = set_counts
__UpperCAmelCase : int = max(UpperCamelCase_ )
__UpperCAmelCase : List[str] = len(UpperCamelCase_ )
__UpperCAmelCase : Any = [1] * num_sets
__UpperCAmelCase : Any = list(range(UpperCamelCase_ ) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[int] = self.get_parent(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = self.get_parent(UpperCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : List[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Dict = src_parent
__UpperCAmelCase : Dict = self.set_counts[src_parent]
__UpperCAmelCase : Dict = max(self.max_set , UpperCamelCase_ )
return True
def _snake_case ( self , UpperCamelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase : str = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 10 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Optional[Any] = DebertaVaTokenizer
snake_case :Dict = DebertaVaTokenizerFast
snake_case :str = True
snake_case :List[str] = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = "this is a test"
__UpperCAmelCase : Tuple = "this is a test"
return input_text, output_text
def _snake_case ( self ):
__UpperCAmelCase : Tuple = "<pad>"
__UpperCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(UpperCamelCase_ ) , 3_00_01 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def _snake_case ( self ):
# fmt: off
__UpperCAmelCase : List[str] = " \tHeLLo!how \n Are yoU? "
__UpperCAmelCase : Tuple = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__UpperCAmelCase : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
__UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[str] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
__UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _snake_case ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
__UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
__UpperCAmelCase : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCAmelCase : Optional[Any] = DebertaVaTokenizer(UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Dict = DebertaVaTokenizerFast(UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
# fmt: off
__UpperCAmelCase : List[str] = "I was born in 92000, and this is falsé."
__UpperCAmelCase : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCAmelCase : str = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
# fmt: off
__UpperCAmelCase : Optional[int] = "I was born in 92000, and this is falsé."
__UpperCAmelCase : str = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCAmelCase : List[str] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[str] = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
# fmt: off
__UpperCAmelCase : Union[str, Any] = "I was born in 92000, and this is falsé."
__UpperCAmelCase : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__UpperCAmelCase : List[Any] = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : int = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
# fmt: off
__UpperCAmelCase : Optional[int] = " \tHeLLo!how \n Are yoU? "
__UpperCAmelCase : int = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__UpperCAmelCase : int = DebertaVaTokenizer(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Tuple = DebertaVaTokenizerFast(UpperCamelCase_ , do_lower_case=UpperCamelCase_ , split_by_punct=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCAmelCase : Union[str, Any] = "I was born in 92000, and this is falsé."
__UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
__UpperCAmelCase : int = tokenizer.encode(UpperCamelCase_ )
__UpperCAmelCase : Any = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : str = "This is a test"
__UpperCAmelCase : List[Any] = [13, 1, 43_98, 25, 21, 12_89]
__UpperCAmelCase : Optional[int] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__UpperCAmelCase : Dict = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__UpperCAmelCase : int = DebertaVaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
__UpperCAmelCase : List[Any] = DebertaVaTokenizerFast(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
__UpperCAmelCase : Tuple = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Tuple = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : str = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : str = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# fmt: off
__UpperCAmelCase : Dict = "I was born in 92000, and this is falsé."
__UpperCAmelCase : Dict = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
__UpperCAmelCase : Dict = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__UpperCAmelCase : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__UpperCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Dict = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = DebertaVaTokenizer(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = tokenizer.encode("sequence builders" )
__UpperCAmelCase : int = tokenizer.encode("multi-sequence build" )
__UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase_ , )
@slow
def _snake_case ( self ):
# fmt: off
__UpperCAmelCase : int = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 702 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : str = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ["ViTFeatureExtractor"]
_a : Dict = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 704 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_a : Tuple = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_a : Optional[Any] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = VOCAB_FILES_NAMES
snake_case :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : Optional[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset
__UpperCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
__UpperCAmelCase : List[str] = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 10 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 705 | '''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Tuple = 2_50
__UpperCAmelCase : str = ids_tensor((batch_size, length) , UpperCamelCase_ )
__UpperCAmelCase : Any = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
__UpperCAmelCase : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : int = MaxLengthCriteria(max_length=10 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Dict = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def _snake_case ( self ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self._get_tensors(5 )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
__UpperCAmelCase : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def _snake_case ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__UpperCAmelCase : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 10 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A (__magic_name__ ):
snake_case :Dict = (PNDMScheduler,)
snake_case :List[Any] = (("num_inference_steps", 50),)
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : int = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**UpperCamelCase_ )
return config
def _snake_case ( self , UpperCamelCase_=0 , **UpperCamelCase_ ):
__UpperCAmelCase : Any = dict(self.forward_default_kwargs )
__UpperCAmelCase : Dict = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
__UpperCAmelCase : str = self.dummy_sample
__UpperCAmelCase : Optional[Any] = 0.1 * sample
__UpperCAmelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : List[Any] = self.get_scheduler_config(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
__UpperCAmelCase : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
__UpperCAmelCase : int = scheduler_class.from_pretrained(UpperCamelCase_ )
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals
__UpperCAmelCase : Any = dummy_past_residuals[:]
__UpperCAmelCase : Any = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : Dict = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase : List[str] = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : int = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ):
pass
def _snake_case ( self , UpperCamelCase_=0 , **UpperCamelCase_ ):
__UpperCAmelCase : str = dict(self.forward_default_kwargs )
__UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
__UpperCAmelCase : int = self.dummy_sample
__UpperCAmelCase : int = 0.1 * sample
__UpperCAmelCase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : Optional[int] = self.get_scheduler_config()
__UpperCAmelCase : Optional[int] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase_ )
__UpperCAmelCase : List[Any] = scheduler_class.from_pretrained(UpperCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
__UpperCAmelCase : str = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : str = new_scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase : Any = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : List[str] = new_scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : Dict = self.scheduler_classes[0]
__UpperCAmelCase : Optional[int] = self.get_scheduler_config(**UpperCamelCase_ )
__UpperCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = 10
__UpperCAmelCase : Optional[Any] = self.dummy_model()
__UpperCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
__UpperCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__UpperCAmelCase : Any = model(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : List[Any] = scheduler.step_plms(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
__UpperCAmelCase : Optional[int] = kwargs.pop("num_inference_steps" , UpperCamelCase_ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : str = self.get_scheduler_config()
__UpperCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase : Any = self.dummy_sample
__UpperCAmelCase : List[str] = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase_ , "set_timesteps" ):
scheduler.set_timesteps(UpperCamelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase_ , "set_timesteps" ):
__UpperCAmelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__UpperCAmelCase : Optional[Any] = dummy_past_residuals[:]
__UpperCAmelCase : Dict = scheduler.step_prk(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : int = scheduler.step_prk(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase : Optional[int] = scheduler.step_plms(UpperCamelCase_ , 0 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
__UpperCAmelCase : Union[str, Any] = scheduler.step_plms(UpperCamelCase_ , 1 , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self ):
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def _snake_case ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
__UpperCAmelCase : str = self.scheduler_classes[0]
__UpperCAmelCase : str = self.get_scheduler_config(steps_offset=1 )
__UpperCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def _snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def _snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def _snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def _snake_case ( self ):
for t in [1, 5, 10]:
self.check_over_forward(time_step=UpperCamelCase_ )
def _snake_case ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=UpperCamelCase_ )
def _snake_case ( self ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__UpperCAmelCase : Dict = 27
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : Optional[Any] = self.dummy_sample
__UpperCAmelCase : int = 0.1 * sample
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : str = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(UpperCamelCase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__UpperCAmelCase : int = scheduler.step_prk(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
def _snake_case ( self ):
with self.assertRaises(UpperCamelCase_ ):
__UpperCAmelCase : Tuple = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config()
__UpperCAmelCase : Optional[Any] = scheduler_class(**UpperCamelCase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _snake_case ( self ):
__UpperCAmelCase : Any = self.full_loop()
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" )
__UpperCAmelCase : Any = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : str = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[str] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : Dict = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : Optional[int] = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 706 | '''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 10 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a : str = logging.get_logger(__name__)
class __A (__magic_name__ ):
snake_case :Optional[Any] = ["pixel_values"]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : List[str] = size if size is not None else {"shortest_edge": 2_56}
__UpperCAmelCase : int = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
__UpperCAmelCase : str = get_size_dict(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = do_resize
__UpperCAmelCase : Dict = size
__UpperCAmelCase : Dict = resample
__UpperCAmelCase : str = do_center_crop
__UpperCAmelCase : Union[str, Any] = crop_size
__UpperCAmelCase : List[str] = do_rescale
__UpperCAmelCase : List[Any] = rescale_factor
__UpperCAmelCase : List[Any] = do_normalize
__UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__UpperCAmelCase : Optional[int] = get_resize_output_image_size(UpperCamelCase_ , size=size["shortest_edge"] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase_ )
return center_crop(UpperCamelCase_ , size=(size["height"], size["width"]) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
__UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Union[str, Any] = size if size is not None else self.size
__UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__UpperCAmelCase : Tuple = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : List[Any] = get_size_dict(UpperCamelCase_ )
__UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : List[Any] = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Optional[Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__UpperCAmelCase : List[Any] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__UpperCAmelCase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
__UpperCAmelCase : Optional[int] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
__UpperCAmelCase : Optional[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__UpperCAmelCase : List[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__UpperCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 707 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
_a : Tuple = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
_a : List[Any] = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__UpperCAmelCase : Optional[Any] = bs[:]
__UpperCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase : Dict = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = set()
__UpperCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Optional[Any] = char
return pairs
class __A (__magic_name__ ):
snake_case :Optional[int] = VOCAB_FILES_NAMES
snake_case :List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : str = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token
__UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token
__UpperCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
with open(UpperCamelCase_ , encoding="utf-8" ) as vocab_handle:
__UpperCAmelCase : int = json.load(UpperCamelCase_ )
__UpperCAmelCase : Any = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase : Any = errors # how to handle errors in decoding
__UpperCAmelCase : str = bytes_to_unicode()
__UpperCAmelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase_ , encoding="utf-8" ) as merges_handle:
__UpperCAmelCase : str = merges_handle.read().split("\n" )[1:-1]
__UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase : Dict = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__UpperCAmelCase : str = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : Any = []
__UpperCAmelCase : List[str] = 0
while i < len(UpperCamelCase_ ):
try:
__UpperCAmelCase : Union[str, Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase : str = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase : Dict = tuple(UpperCamelCase_ )
__UpperCAmelCase : str = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__UpperCAmelCase : int = get_pairs(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = " ".join(UpperCamelCase_ )
__UpperCAmelCase : Dict = word
return word
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
for token in re.findall(self.pat , UpperCamelCase_ ):
__UpperCAmelCase : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(" " ) )
return bpe_tokens
def _snake_case ( self , UpperCamelCase_ ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self , UpperCamelCase_ ):
return self.decoder.get(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = "".join(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : Any = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + "\n" )
__UpperCAmelCase : str = 0
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__UpperCAmelCase : str = token_index
writer.write(" ".join(UpperCamelCase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
__UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
__UpperCAmelCase : List[str] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()):
__UpperCAmelCase : Tuple = " " + text
return (text, kwargs)
| 10 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __A (unittest.TestCase , __magic_name__ ):
def _snake_case ( self ):
__UpperCAmelCase : str = load_tool("text-classification" )
self.tool.setup()
__UpperCAmelCase : Any = load_tool("text-classification" , remote=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(UpperCamelCase_ , "positive" )
def _snake_case ( self ):
__UpperCAmelCase : str = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(UpperCamelCase_ , "positive" )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(UpperCamelCase_ , "positive" )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(UpperCamelCase_ , "positive" )
| 708 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : int = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A (__magic_name__ ):
snake_case :Optional[int] = "speech_to_text_2"
snake_case :List[Any] = ["past_key_values"]
snake_case :str = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase_=1_00_00 , UpperCamelCase_=6 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=0.0 , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=2_56 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=2 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=10_24 , **UpperCamelCase_ , ):
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : List[str] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Dict = dropout
__UpperCAmelCase : Optional[Any] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = decoder_layerdrop
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : int = decoder_layers
__UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 10 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowercase ( *lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__=True , lowerCamelCase__=2 ) -> Any:
"""simple docstring"""
from .. import __version__
__UpperCAmelCase : Optional[Any] = take_from
__UpperCAmelCase : Any = ()
if not isinstance(args[0] , lowerCamelCase__ ):
__UpperCAmelCase : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase__ ).base_version ) >= version.parse(lowerCamelCase__ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
__UpperCAmelCase : List[Any] = None
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase__ ),)
__UpperCAmelCase : List[Any] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
values += (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
__UpperCAmelCase : Any = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
__UpperCAmelCase : Optional[int] = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
__UpperCAmelCase : Optional[int] = warning + " " if standard_warn else ""
warnings.warn(warning + message , lowerCamelCase__ , stacklevel=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : str = inspect.getouterframes(inspect.currentframe() )[1]
__UpperCAmelCase : Union[str, Any] = call_frame.filename
__UpperCAmelCase : List[str] = call_frame.lineno
__UpperCAmelCase : Dict = call_frame.function
__UpperCAmelCase : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowerCamelCase__ ) == 0:
return
elif len(lowerCamelCase__ ) == 1:
return values[0]
return values
| 709 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ = 100 ) -> int:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
__UpperCAmelCase : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 10 | 0 |
def _lowercase ( ) -> int:
"""simple docstring"""
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCamelCase__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
random.seed(lowerCamelCase__ )
np.random.seed(lowerCamelCase__ )
torch.manual_seed(lowerCamelCase__ )
torch.cuda.manual_seed_all(lowerCamelCase__ )
# ^^ safe to call this function even if cuda is not available
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = 0.9_9_9_9 , UpperCamelCase_ = 0.0 , UpperCamelCase_ = 0 , UpperCamelCase_ = False , UpperCamelCase_ = 1.0 , UpperCamelCase_ = 2 / 3 , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
if isinstance(UpperCamelCase_ , torch.nn.Module ):
__UpperCAmelCase : int = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ , )
__UpperCAmelCase : Optional[int] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__UpperCAmelCase : str = True
if kwargs.get("max_value" , UpperCamelCase_ ) is not None:
__UpperCAmelCase : Dict = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value" , UpperCamelCase_ ) is not None:
__UpperCAmelCase : Union[str, Any] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
__UpperCAmelCase : int = kwargs["min_value"]
__UpperCAmelCase : Union[str, Any] = list(UpperCamelCase_ )
__UpperCAmelCase : str = [p.clone().detach() for p in parameters]
if kwargs.get("device" , UpperCamelCase_ ) is not None:
__UpperCAmelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
self.to(device=kwargs["device"] )
__UpperCAmelCase : int = None
__UpperCAmelCase : int = decay
__UpperCAmelCase : Any = min_decay
__UpperCAmelCase : Any = update_after_step
__UpperCAmelCase : int = use_ema_warmup
__UpperCAmelCase : Dict = inv_gamma
__UpperCAmelCase : Any = power
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = None # set in `step()`
__UpperCAmelCase : List[str] = model_cls
__UpperCAmelCase : Optional[int] = model_config
@classmethod
def _snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : int = model_cls.load_config(UpperCamelCase_ , return_unused_kwargs=UpperCamelCase_ )
__UpperCAmelCase : str = model_cls.from_pretrained(UpperCamelCase_ )
__UpperCAmelCase : int = cls(model.parameters() , model_cls=UpperCamelCase_ , model_config=model.config )
ema_model.load_state_dict(UpperCamelCase_ )
return ema_model
def _snake_case ( self , UpperCamelCase_ ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
__UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
__UpperCAmelCase : Any = self.state_dict()
state_dict.pop("shadow_params" , UpperCamelCase_ )
model.register_to_config(**UpperCamelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__UpperCAmelCase : Optional[int] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__UpperCAmelCase : Any = (1 + step) / (10 + step)
__UpperCAmelCase : Any = min(UpperCamelCase_ , self.decay )
# make sure decay is not smaller than min_decay
__UpperCAmelCase : int = max(UpperCamelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _snake_case ( self , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , torch.nn.Module ):
__UpperCAmelCase : Optional[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , UpperCamelCase_ , standard_warn=UpperCamelCase_ , )
__UpperCAmelCase : Tuple = parameters.parameters()
__UpperCAmelCase : Optional[int] = list(UpperCamelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__UpperCAmelCase : Optional[Any] = self.get_decay(self.optimization_step )
__UpperCAmelCase : int = decay
__UpperCAmelCase : int = 1 - decay
__UpperCAmelCase : Dict = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , UpperCamelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__UpperCAmelCase : Dict = deepspeed.zero.GatheredParameters(UpperCamelCase_ , modifier_rank=UpperCamelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = list(UpperCamelCase_ )
for s_param, param in zip(self.shadow_params , UpperCamelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def _snake_case ( self , UpperCamelCase_=None , UpperCamelCase_=None ):
__UpperCAmelCase : str = [
p.to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) if p.is_floating_point() else p.to(device=UpperCamelCase_ )
for p in self.shadow_params
]
def _snake_case ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : str = [param.detach().cpu().clone() for param in parameters]
def _snake_case ( self , UpperCamelCase_ ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , UpperCamelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
__UpperCAmelCase : str = None
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = copy.deepcopy(UpperCamelCase_ )
__UpperCAmelCase : Tuple = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
__UpperCAmelCase : Dict = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , UpperCamelCase_ ):
raise ValueError("Invalid min_decay" )
__UpperCAmelCase : Optional[Any] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , UpperCamelCase_ ):
raise ValueError("Invalid optimization_step" )
__UpperCAmelCase : List[str] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , UpperCamelCase_ ):
raise ValueError("Invalid update_after_step" )
__UpperCAmelCase : Dict = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , UpperCamelCase_ ):
raise ValueError("Invalid use_ema_warmup" )
__UpperCAmelCase : Any = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
__UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
__UpperCAmelCase : str = state_dict.get("shadow_params" , UpperCamelCase_ )
if shadow_params is not None:
__UpperCAmelCase : Union[str, Any] = shadow_params
if not isinstance(self.shadow_params , UpperCamelCase_ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(UpperCamelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 711 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 0 |
'''simple docstring'''
import os
import platform
import sys
_a : List[str] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 712 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : List[Any] = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __A (__magic_name__ ):
snake_case :Any = "cvt"
def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_sizes
__UpperCAmelCase : List[str] = patch_stride
__UpperCAmelCase : Tuple = patch_padding
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : Any = depth
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[str] = attention_drop_rate
__UpperCAmelCase : Dict = drop_rate
__UpperCAmelCase : Dict = drop_path_rate
__UpperCAmelCase : str = qkv_bias
__UpperCAmelCase : Optional[int] = cls_token
__UpperCAmelCase : Optional[Any] = qkv_projection_method
__UpperCAmelCase : Tuple = kernel_qkv
__UpperCAmelCase : Optional[Any] = padding_kv
__UpperCAmelCase : Optional[int] = stride_kv
__UpperCAmelCase : Any = padding_q
__UpperCAmelCase : List[Any] = stride_q
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
| 10 | 0 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a : List[Any] = "__DUMMY_TRANSFORMERS_USER__"
_a : str = "Dummy User"
_a : str = "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"
_a : Optional[Any] = "https://hub-ci.huggingface.co"
_a : List[str] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
_a : List[Any] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
_a : List[str] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCamelCase__ )
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCamelCase__ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCamelCase__ )
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCamelCase__ )
@pytest.fixture
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
HfFolder.save_token(lowerCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
return HfApi(endpoint=lowerCamelCase__ )
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = HfFolder.get_token()
HfFolder.save_token(lowerCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCamelCase__ )
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
def _cleanup_repo(lowerCamelCase__ ):
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
@contextmanager
def _temporary_repo(lowerCamelCase__ ):
try:
yield repo_id
finally:
cleanup_repo(lowerCamelCase__ )
return _temporary_repo
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : List[str] = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__UpperCAmelCase : int = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : str = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__UpperCAmelCase : str = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data.zip" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[str] = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__UpperCAmelCase : int = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" , private=lowerCamelCase__ )
hf_api.upload_file(
token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="data.zip" , repo_id=lowerCamelCase__ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 713 | '''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> list[float]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = coefficient_matrix.shape
__UpperCAmelCase , __UpperCAmelCase : Any = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : str = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowerCamelCase__ )
if rowsa != rowsa:
__UpperCAmelCase : Optional[int] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowerCamelCase__ )
if len(lowerCamelCase__ ) != rowsa:
__UpperCAmelCase : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f"""matrix but received {len(lowerCamelCase__ )} and {rowsa}"""
)
raise ValueError(lowerCamelCase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = table.shape
strictly_diagonally_dominant(lowerCamelCase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase__ ):
__UpperCAmelCase : int = []
for row in range(lowerCamelCase__ ):
__UpperCAmelCase : List[str] = 0
for col in range(lowerCamelCase__ ):
if col == row:
__UpperCAmelCase : int = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Any = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : List[Any] = (temp + val) / denom
new_val.append(lowerCamelCase__ )
__UpperCAmelCase : str = new_val
return [float(lowerCamelCase__ ) for i in new_val]
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = table.shape
__UpperCAmelCase : str = True
for i in range(0 , lowerCamelCase__ ):
__UpperCAmelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : int = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714 | '''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : Any = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_a : Optional[Any] = logging.get_logger(__name__)
class __A (__magic_name__ ):
def __init__( self , **UpperCamelCase_ ):
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[int] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__UpperCAmelCase : Tuple = parent.find_all(child.name , recursive=UpperCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) )
__UpperCAmelCase : Tuple = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Any = BeautifulSoup(UpperCamelCase_ , "html.parser" )
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Any = []
for element in html_code.descendants:
if type(UpperCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__UpperCAmelCase : Dict = html.unescape(UpperCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase_ )
__UpperCAmelCase : int = self.xpath_soup(UpperCamelCase_ )
stringaxtag_seq.append(UpperCamelCase_ )
stringaxsubs_seq.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = ""
for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = False
# Check that strings has a valid type
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = True
elif isinstance(UpperCamelCase_ , (list, tuple) ):
if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f"""but is of type {type(UpperCamelCase_ )}.""" )
__UpperCAmelCase : Any = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) )
if not is_batched:
__UpperCAmelCase : Any = [html_strings]
# Get nodes + xpaths
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = []
for html_string in html_strings:
__UpperCAmelCase : Union[str, Any] = self.get_three_from_single(UpperCamelCase_ )
nodes.append(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = []
for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ )
xpath_strings.append(UpperCamelCase_ )
xpaths.append(UpperCamelCase_ )
# return as Dict
__UpperCAmelCase : int = {"nodes": nodes, "xpaths": xpaths}
__UpperCAmelCase : Tuple = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
return encoded_inputs
| 715 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Dict = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
for param in module.parameters():
__UpperCAmelCase : List[Any] = False
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCAmelCase : List[str] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = plt.imshow(lowerCamelCase__ )
fig.axes.get_xaxis().set_visible(lowerCamelCase__ )
fig.axes.get_yaxis().set_visible(lowerCamelCase__ )
plt.show()
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = datetime.now()
__UpperCAmelCase : int = current_time.strftime("%H:%M:%S" )
return timestamp
| 716 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Any = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "ibert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=False , UpperCamelCase_="none" , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : List[Any] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Tuple = quant_mode
__UpperCAmelCase : Union[str, Any] = force_dequant
class __A (__magic_name__ ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 10 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_a : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_a : Dict = {
"Salesforce/codegen-350M-mono": 2048,
}
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :str = PRETRAINED_VOCAB_FILES_MAP
snake_case :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Tuple = ["input_ids", "attention_mask"]
snake_case :Dict = CodeGenTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
if kwargs.pop("add_bos_token" , UpperCamelCase_ ):
__UpperCAmelCase : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
__UpperCAmelCase : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_ ) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type" ) )
__UpperCAmelCase : Optional[int] = add_prefix_space
__UpperCAmelCase : Tuple = pre_tok_class(**UpperCamelCase_ )
__UpperCAmelCase : Tuple = add_prefix_space
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__UpperCAmelCase : Any = kwargs.get("is_split_into_words" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : str = super().decode(
token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , )
if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0:
__UpperCAmelCase : Union[str, Any] = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
return decoded_text
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = pattern.search(UpperCamelCase_ , UpperCamelCase_ )
return m.start() if m else -1
__UpperCAmelCase : List[str] = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern]
__UpperCAmelCase : Optional[Any] = list(re.finditer("^print" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : List[Any] = completion[: prints[1].start()]
__UpperCAmelCase : Tuple = list(re.finditer("^def" , UpperCamelCase_ , re.MULTILINE ) )
if len(UpperCamelCase_ ) > 1:
__UpperCAmelCase : Union[str, Any] = completion[: defs[1].start()]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Dict = [
pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase_ ) > 0:
return completion[: min(UpperCamelCase_ )]
else:
return completion
| 717 | '''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : str = HfArgumentParser(lowerCamelCase__ )
__UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
__UpperCAmelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCAmelCase : str = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCAmelCase : Tuple = " ".join(str(lowerCamelCase__ ).split(" " )[:-1] )
__UpperCAmelCase : Any = ""
__UpperCAmelCase : List[Any] = eval(str(lowerCamelCase__ ).split(" " )[-1] )
__UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 10 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = "▁"
_a : Dict = {"vocab_file": "sentencepiece.bpe.model"}
_a : str = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
_a : Tuple = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_a : str = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __A (__magic_name__ ):
snake_case :Optional[Any] = VOCAB_FILES_NAMES
snake_case :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case :Dict = ["input_ids", "attention_mask"]
snake_case :List[int] = []
snake_case :List[int] = []
def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_=False , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCAmelCase : Any = legacy_behaviour
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__UpperCAmelCase : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCAmelCase : List[Any] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCAmelCase : str = 1
__UpperCAmelCase : Union[str, Any] = len(self.sp_model )
__UpperCAmelCase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase_ )
}
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
__UpperCAmelCase : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCAmelCase : Any = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__UpperCAmelCase : Optional[Any] = src_lang if src_lang is not None else "eng_Latn"
__UpperCAmelCase : Optional[int] = self.lang_code_to_id[self._src_lang]
__UpperCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
__UpperCAmelCase : int = self.__dict__.copy()
__UpperCAmelCase : str = None
__UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCamelCase_ ):
__UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
return self._src_lang
@src_lang.setter
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = [1] * len(self.prefix_tokens )
__UpperCAmelCase : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase_ )) + ([0] * len(UpperCamelCase_ )) + suffix_ones
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__UpperCAmelCase : Optional[Any] = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__UpperCAmelCase : Union[str, Any] = src_lang
__UpperCAmelCase : str = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = tgt_lang_id
return inputs
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , UpperCamelCase_ ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : Union[str, Any] = self.sp_model.PieceToId(UpperCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , UpperCamelCase_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : str = "".join(UpperCamelCase_ ).replace(UpperCamelCase_ , " " ).strip()
return out_string
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase : List[str] = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , "wb" ) as fi:
__UpperCAmelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = "eng_Latn" , UpperCamelCase_ = None , UpperCamelCase_ = "fra_Latn" , **UpperCamelCase_ , ):
__UpperCAmelCase : Tuple = src_lang
__UpperCAmelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__UpperCAmelCase : Dict = [self.cur_lang_code]
__UpperCAmelCase : Any = [self.eos_token_id]
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__UpperCAmelCase : Tuple = [self.cur_lang_code]
__UpperCAmelCase : Any = [self.eos_token_id]
| 718 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Tuple = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __A (__magic_name__ ):
snake_case :Union[str, Any] = "codegen"
snake_case :List[str] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase_=5_04_00 , UpperCamelCase_=20_48 , UpperCamelCase_=20_48 , UpperCamelCase_=40_96 , UpperCamelCase_=28 , UpperCamelCase_=16 , UpperCamelCase_=64 , UpperCamelCase_=None , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0_2 , UpperCamelCase_=True , UpperCamelCase_=5_02_56 , UpperCamelCase_=5_02_56 , UpperCamelCase_=False , **UpperCamelCase_ , ):
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : str = n_ctx
__UpperCAmelCase : Optional[int] = n_positions
__UpperCAmelCase : Dict = n_embd
__UpperCAmelCase : Tuple = n_layer
__UpperCAmelCase : Tuple = n_head
__UpperCAmelCase : List[str] = n_inner
__UpperCAmelCase : Union[str, Any] = rotary_dim
__UpperCAmelCase : Union[str, Any] = activation_function
__UpperCAmelCase : List[Any] = resid_pdrop
__UpperCAmelCase : str = embd_pdrop
__UpperCAmelCase : Optional[int] = attn_pdrop
__UpperCAmelCase : Optional[int] = layer_norm_epsilon
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : str = use_cache
__UpperCAmelCase : Dict = bos_token_id
__UpperCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ )
class __A (__magic_name__ ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = "default" , UpperCamelCase_ = None , UpperCamelCase_ = False , ):
super().__init__(UpperCamelCase_ , task=UpperCamelCase_ , patching_specs=UpperCamelCase_ , use_past=UpperCamelCase_ )
if not getattr(self._config , "pad_token_id" , UpperCamelCase_ ):
# TODO: how to do that better?
__UpperCAmelCase : Tuple = 0
@property
def _snake_case ( self ):
__UpperCAmelCase : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs" )
__UpperCAmelCase : List[str] = {0: "batch", 1: "past_sequence + sequence"}
else:
__UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , ):
__UpperCAmelCase : Optional[int] = super(UpperCamelCase_ , self ).generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCAmelCase : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCAmelCase : Any = seqlen + 2
__UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase : List[str] = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(self.num_layers )
]
__UpperCAmelCase : str = common_inputs["attention_mask"]
if self.use_past:
__UpperCAmelCase : Any = ordered_inputs["attention_mask"].dtype
__UpperCAmelCase : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 719 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10 | 0 |
'''simple docstring'''
import os
def _lowercase ( lowerCamelCase__ = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as in_file:
__UpperCAmelCase : str = in_file.read()
__UpperCAmelCase : Union[str, Any] = [[int(lowerCamelCase__ ) for cell in row.split("," )] for row in data.strip().splitlines()]
__UpperCAmelCase : Any = [[0 for cell in row] for row in grid]
__UpperCAmelCase : List[str] = len(grid[0] )
__UpperCAmelCase : Any = [[0 for i in range(lowerCamelCase__ )] for j in range(lowerCamelCase__ )]
__UpperCAmelCase : str = grid[0][0]
for i in range(1 , lowerCamelCase__ ):
__UpperCAmelCase : int = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCamelCase__ ):
__UpperCAmelCase : Optional[Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCamelCase__ ):
for j in range(1 , lowerCamelCase__ ):
__UpperCAmelCase : Any = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 720 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
snake_case :int
snake_case :Node | None = None
snake_case :Node | None = None
def _lowercase ( ) -> Node | None:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Node(1 )
__UpperCAmelCase : Union[str, Any] = Node(2 )
__UpperCAmelCase : Optional[int] = Node(3 )
__UpperCAmelCase : Optional[int] = Node(4 )
__UpperCAmelCase : List[Any] = Node(5 )
return tree
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowercase ( lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _lowercase ( lowerCamelCase__ ) -> Sequence[Node | None]:
"""simple docstring"""
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : int = deque([root] )
while process_queue:
__UpperCAmelCase : int = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Sequence[Node | None]:
"""simple docstring"""
__UpperCAmelCase : list[Any] = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Sequence[Node | None]:
"""simple docstring"""
__UpperCAmelCase : list[Any] = []
def populate_output(lowerCamelCase__ , lowerCamelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _lowercase ( lowerCamelCase__ ) -> Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : str = 0
__UpperCAmelCase : Tuple = height(lowerCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCAmelCase : Union[str, Any] = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCAmelCase : Optional[int] = 0
return output
def _lowercase ( ) -> None: # Main function for testing.
"""simple docstring"""
__UpperCAmelCase : int = make_tree()
print(f"""In-order Traversal: {inorder(lowerCamelCase__ )}""" )
print(f"""Pre-order Traversal: {preorder(lowerCamelCase__ )}""" )
print(f"""Post-order Traversal: {postorder(lowerCamelCase__ )}""" , "\n" )
print(f"""Height of Tree: {height(lowerCamelCase__ )}""" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowerCamelCase__ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowerCamelCase__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase__ , level=lowerCamelCase__ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 721 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number | (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import copy
import re
class UpperCAmelCase__ :
lowerCAmelCase_ = 'hp'
lowerCAmelCase_ = {}
lowerCAmelCase_ = None
@classmethod
def lowerCamelCase_ ( cls : Any,__A : Tuple,__A : Dict ):
_lowerCamelCase : Optional[Any] = prefix
_lowerCamelCase : int = defaults
cls.build_naming_info()
@staticmethod
def lowerCamelCase_ ( __A : List[str],__A : Tuple ):
if len(__A ) == 0:
return ""
_lowerCamelCase : Any = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1,len(__A ) + 1 ):
_lowerCamelCase : List[str] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_lowerCamelCase : Optional[int] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__A : Tuple ):
_lowerCamelCase : Dict = ""
while integer != 0:
_lowerCamelCase : Optional[int] = chr(ord("A" ) + integer % 1_0 ) + s
integer //= 1_0
return s
_lowerCamelCase : Any = 0
while True:
_lowerCamelCase : Optional[int] = word + "#" + int_to_alphabetic(__A )
if sword in info["reverse_short_word"]:
continue
else:
_lowerCamelCase : Dict = sword
break
_lowerCamelCase : Dict = short_word
_lowerCamelCase : List[Any] = word
return short_word
@staticmethod
def lowerCamelCase_ ( __A : str,__A : List[str] ):
_lowerCamelCase : Tuple = param_name.split("_" )
_lowerCamelCase : List[Any] = [TrialShortNamer.shortname_for_word(__A,__A ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_lowerCamelCase : List[str] = ["", "_"]
for separator in separators:
_lowerCamelCase : int = separator.join(__A )
if shortname not in info["reverse_short_param"]:
_lowerCamelCase : List[Any] = shortname
_lowerCamelCase : Any = param_name
return shortname
return param_name
@staticmethod
def lowerCamelCase_ ( __A : Tuple,__A : Dict ):
_lowerCamelCase : Optional[int] = TrialShortNamer.shortname_for_key(__A,__A )
_lowerCamelCase : Dict = short_name
_lowerCamelCase : List[Any] = param_name
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] ):
if cls.NAMING_INFO is not None:
return
_lowerCamelCase : Dict = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
_lowerCamelCase : List[Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__A,__A )
_lowerCamelCase : Dict = info
@classmethod
def lowerCamelCase_ ( cls : List[str],__A : Any ):
cls.build_naming_info()
assert cls.PREFIX is not None
_lowerCamelCase : str = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_lowerCamelCase : Tuple = cls.NAMING_INFO["short_param"][k]
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = 1 if v else 0
_lowerCamelCase : Any = "" if isinstance(__A,(int, float) ) else "-"
_lowerCamelCase : int = f'{key}{sep}{v}'
name.append(__A )
return "_".join(__A )
@classmethod
def lowerCamelCase_ ( cls : Tuple,__A : Optional[Any] ):
_lowerCamelCase : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_lowerCamelCase : int = []
else:
_lowerCamelCase : List[str] = repr.split("_" )
_lowerCamelCase : Union[str, Any] = {}
for value in values:
if "-" in value:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = value.split("-" )
else:
_lowerCamelCase : Optional[int] = re.sub("[0-9.]","",__A )
_lowerCamelCase : Optional[Any] = float(re.sub("[^0-9.]","",__A ) )
_lowerCamelCase : Tuple = cls.NAMING_INFO["reverse_short_param"][p_k]
_lowerCamelCase : Any = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_lowerCamelCase : Any = cls.DEFAULTS[k]
return parameters | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCAmelCase_ : Tuple = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class UpperCAmelCase__ :
def __init__( self : List[str],__A : int = 1_4 ):
if group not in primes:
raise ValueError("Unsupported Group" )
_lowerCamelCase : Union[str, Any] = primes[group]["prime"]
_lowerCamelCase : Tuple = primes[group]["generator"]
_lowerCamelCase : Any = int(hexlify(urandom(3_2 ) ),base=1_6 )
def lowerCamelCase_ ( self : str ):
return hex(self.__private_key )[2:]
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[int] = pow(self.generator,self.__private_key,self.prime )
return hex(__A )[2:]
def lowerCamelCase_ ( self : List[str],__A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(__A,(self.prime - 1) // 2,self.prime ) == 1
)
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Optional[int] = int(__A,base=1_6 )
if not self.is_valid_public_key(__A ):
raise ValueError("Invalid public key" )
_lowerCamelCase : Tuple = pow(__A,self.__private_key,self.prime )
return shaaaa(str(__A ).encode() ).hexdigest()
@staticmethod
def lowerCamelCase_ ( __A : int,__A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(__A,(prime - 1) // 2,__A ) == 1
)
@staticmethod
def lowerCamelCase_ ( __A : str,__A : str,__A : int = 1_4 ):
_lowerCamelCase : Optional[Any] = int(__A,base=1_6 )
_lowerCamelCase : Dict = int(__A,base=1_6 )
_lowerCamelCase : str = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(__A,__A ):
raise ValueError("Invalid public key" )
_lowerCamelCase : Dict = pow(__A,__A,__A )
return shaaaa(str(__A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCAmelCase__ ( nn.Module ):
def __init__( self : str ):
super().__init__()
_lowerCamelCase : Union[str, Any] = nn.Linear(3,4 )
_lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
_lowerCamelCase : Dict = nn.Linear(4,5 )
def lowerCamelCase_ ( self : int,__A : Optional[Any] ):
return self.lineara(self.batchnorm(self.lineara(__A ) ) )
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],*__A : List[Any],**__A : str ):
return (args[0] + 1,) + args[1:], kwargs
class UpperCAmelCase__ ( A ):
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Union[str, Any] ):
return output + 1
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = ModelForTest()
_lowerCamelCase : int = ModelHook()
add_hook_to_module(__A,__A )
self.assertEqual(test_model._hf_hook,__A )
self.assertTrue(hasattr(__A,"_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__,"forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ),["x"] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A,"_hf_hook" ) )
self.assertFalse(hasattr(__A,"_old_forward" ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = ModelForTest()
_lowerCamelCase : Any = ModelHook()
add_hook_to_module(__A,__A )
add_hook_to_module(__A,__A,append=__A )
self.assertEqual(isinstance(test_model._hf_hook,__A ),__A )
self.assertEqual(len(test_model._hf_hook.hooks ),2 )
self.assertTrue(hasattr(__A,"_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__,"forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ),["x"] )
remove_hook_from_module(__A )
self.assertFalse(hasattr(__A,"_hf_hook" ) )
self.assertFalse(hasattr(__A,"_old_forward" ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = ModelForTest()
_lowerCamelCase : Tuple = torch.randn(2,3 )
_lowerCamelCase : Union[str, Any] = test_model(x + 1 )
_lowerCamelCase : int = test_model(x + 2 )
_lowerCamelCase : Union[str, Any] = PreForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : str = test_model(__A )
self.assertTrue(torch.allclose(__A,__A,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCamelCase : List[str] = PreForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : List[Any] = test_model(__A )
self.assertTrue(torch.allclose(__A,__A,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCamelCase : Any = SequentialHook(PreForwardHook(),PreForwardHook() )
add_hook_to_module(__A,__A )
_lowerCamelCase : Any = test_model(__A )
assert torch.allclose(__A,__A,atol=1e-5 )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = ModelForTest()
_lowerCamelCase : Optional[int] = torch.randn(2,3 )
_lowerCamelCase : Optional[Any] = test_model(__A )
_lowerCamelCase : List[Any] = PostForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : str = test_model(__A )
self.assertTrue(torch.allclose(__A,output + 1,atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCamelCase : int = PostForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : Any = test_model(__A )
self.assertTrue(torch.allclose(__A,output + 1,atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook(),PostForwardHook() )
add_hook_to_module(__A,__A )
_lowerCamelCase : Tuple = test_model(__A )
assert torch.allclose(__A,output + 2,atol=1e-5 )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = ModelForTest()
_lowerCamelCase : List[Any] = torch.randn(2,3 )
_lowerCamelCase : Union[str, Any] = test_model(__A )
_lowerCamelCase : Optional[int] = PostForwardHook()
add_hook_to_module(__A,__A )
_lowerCamelCase : Optional[Any] = test_model(__A )
self.assertTrue(torch.allclose(__A,output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = test_model(__A )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm,AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara,AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device,torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device,torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device,torch.device(0 ) )
self.assertEqual(model.lineara.weight.device,torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowerCamelCase : Optional[Any] = torch.randn(2,3 )
_lowerCamelCase : Dict = model(__A )
self.assertEqual(output.device,torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__A,AlignDevicesHook(io_same_device=__A ) )
_lowerCamelCase : str = torch.randn(2,3 ).to(0 )
_lowerCamelCase : Any = model(__A )
self.assertEqual(output.device,torch.device(0 ) )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# This will move each submodule on different devices
_lowerCamelCase : Optional[Any] = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara,AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm,AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara,AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : Dict = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device,__A )
_lowerCamelCase : int = torch.randn(2,3 )
_lowerCamelCase : Optional[int] = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# Now test with buffers included in the offload
_lowerCamelCase : Union[str, Any] = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara,AlignDevicesHook(**__A ) )
add_hook_to_module(model.batchnorm,AlignDevicesHook(**__A ) )
add_hook_to_module(model.lineara,AlignDevicesHook(**__A ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device,torch.device("meta" ) )
_lowerCamelCase : Dict = torch.randn(2,3 )
_lowerCamelCase : List[Any] = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# This will move each submodule on different devices
_lowerCamelCase : Union[str, Any] = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(__A,execution_device=__A,offload=__A )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : Optional[int] = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device,__A )
_lowerCamelCase : Optional[int] = torch.randn(2,3 )
_lowerCamelCase : Any = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__A,execution_device=__A,offload=__A,offload_buffers=__A )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device,torch.device("meta" ) )
_lowerCamelCase : Optional[int] = torch.randn(2,3 )
_lowerCamelCase : int = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# This will move each submodule on different devices
_lowerCamelCase : Tuple = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
__A,execution_device=__A,offload=__A,weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCamelCase : Dict = torch.device(__A )
self.assertEqual(model.batchnorm.running_mean.device,__A )
_lowerCamelCase : Optional[Any] = torch.randn(2,3 )
_lowerCamelCase : List[Any] = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__A,execution_device=__A,offload=__A,weights_map=model.state_dict(),offload_buffers=__A,)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device,torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device,torch.device("meta" ) )
_lowerCamelCase : List[Any] = torch.randn(2,3 )
_lowerCamelCase : Optional[int] = model(__A )
self.assertEqual(output.device,__A )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__A )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device,torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device,torch.device("cpu" ) ) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : List[Any],*__A : List[Any],**__A : int ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead.",__A,)
super().__init__(*__A,**__A ) | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CTRLTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
_lowerCamelCase : List[str] = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Union[str, Any] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
_lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Any,**__A : Optional[int] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Any ):
_lowerCamelCase : int = "adapt react readapt apt"
_lowerCamelCase : List[Any] = "adapt react readapt apt"
return input_text, output_text
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = CTRLTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : str = "adapt react readapt apt"
_lowerCamelCase : List[str] = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
_lowerCamelCase : Dict = tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A ) | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple,*__A : int,**__A : Tuple ):
requires_backends(self,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],*__A : Tuple,**__A : Optional[int] ):
requires_backends(cls,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : List[str],*__A : Optional[int],**__A : Union[str, Any] ):
requires_backends(cls,["torch", "transformers", "onnx"] )
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any],*__A : Optional[Any],**__A : Any ):
requires_backends(self,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : int,*__A : Optional[int],**__A : Optional[int] ):
requires_backends(cls,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : int,*__A : List[str],**__A : List[str] ):
requires_backends(cls,["torch", "transformers", "onnx"] )
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : int,*__A : Any,**__A : Optional[int] ):
requires_backends(self,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : int,*__A : Optional[int],**__A : List[Any] ):
requires_backends(cls,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : List[str],*__A : Tuple,**__A : str ):
requires_backends(cls,["torch", "transformers", "onnx"] )
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : str,*__A : Any,**__A : Optional[Any] ):
requires_backends(self,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : Tuple,**__A : Dict ):
requires_backends(cls,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],*__A : List[str],**__A : Optional[int] ):
requires_backends(cls,["torch", "transformers", "onnx"] )
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any],*__A : str,**__A : Dict ):
requires_backends(self,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],*__A : Dict,**__A : str ):
requires_backends(cls,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : List[str],*__A : int,**__A : Optional[int] ):
requires_backends(cls,["torch", "transformers", "onnx"] )
class UpperCAmelCase__ ( metaclass=A ):
lowerCAmelCase_ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int],*__A : Union[str, Any],**__A : int ):
requires_backends(self,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any],*__A : List[Any],**__A : Dict ):
requires_backends(cls,["torch", "transformers", "onnx"] )
@classmethod
def lowerCamelCase_ ( cls : List[str],*__A : str,**__A : int ):
requires_backends(cls,["torch", "transformers", "onnx"] ) | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : int,__A : List[str]=1_3,__A : Any=7,__A : Dict=True,__A : Dict=True,__A : int=True,__A : Optional[Any]=True,__A : Dict=9_9,__A : List[str]=1_6,__A : Dict=3_6,__A : List[Any]=6,__A : Optional[int]=6,__A : str=6,__A : Any=3_7,__A : List[str]="gelu",__A : Union[str, Any]=0.1,__A : Union[str, Any]=0.1,__A : Optional[Any]=5_1_2,__A : int=1_6,__A : List[str]=2,__A : List[Any]=0.02,__A : Optional[int]=3,__A : str=4,__A : Dict=None,):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Any = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = embedding_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_hidden_groups
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : List[str] = num_labels
_lowerCamelCase : List[Any] = num_choices
_lowerCamelCase : Any = scope
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[int] = None
if self.use_token_type_ids:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Optional[int] ):
return AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,num_hidden_groups=self.num_hidden_groups,)
def lowerCamelCase_ ( self : Any,__A : Tuple,__A : List[str],__A : Union[str, Any],__A : int,__A : Union[str, Any],__A : Tuple,__A : Optional[Any] ):
_lowerCamelCase : Optional[Any] = AlbertModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(__A,attention_mask=__A,token_type_ids=__A )
_lowerCamelCase : List[Any] = model(__A,token_type_ids=__A )
_lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : str,__A : str,__A : str,__A : int,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple ):
_lowerCamelCase : Optional[Any] = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Optional[Any] = model(
__A,attention_mask=__A,token_type_ids=__A,labels=__A,sentence_order_label=__A,)
self.parent.assertEqual(result.prediction_logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape,(self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],__A : Union[str, Any],__A : str,__A : Optional[Any],__A : str,__A : Optional[int],__A : Dict ):
_lowerCamelCase : Tuple = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : int = model(__A,attention_mask=__A,token_type_ids=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any,__A : Any,__A : Tuple,__A : Optional[Any],__A : Any,__A : Optional[Any],__A : Tuple,__A : Optional[Any] ):
_lowerCamelCase : Optional[int] = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(
__A,attention_mask=__A,token_type_ids=__A,start_positions=__A,end_positions=__A,)
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str],__A : Union[str, Any],__A : int,__A : Dict,__A : Tuple,__A : Union[str, Any],__A : List[Any],__A : str ):
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : List[Any] = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCamelCase : Dict = model(__A,attention_mask=__A,token_type_ids=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : str,__A : str,__A : Union[str, Any],__A : Optional[int],__A : Tuple,__A : Tuple,__A : List[str],__A : Optional[int] ):
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : Dict = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : List[Any] = model(__A,attention_mask=__A,token_type_ids=__A,labels=__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Dict,__A : Union[str, Any],__A : List[str],__A : Tuple,__A : Dict,__A : Optional[int],__A : Dict,__A : Optional[int] ):
_lowerCamelCase : Any = self.num_choices
_lowerCamelCase : str = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
_lowerCamelCase : Any = model(
__A,attention_mask=__A,token_type_ids=__A,labels=__A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : str = config_and_inputs
_lowerCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
def lowerCamelCase_ ( self : Any,__A : List[Any],__A : List[str],__A : Tuple=False ):
_lowerCamelCase : int = super()._prepare_for_class(__A,__A,return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
_lowerCamelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length),dtype=torch.long,device=__A )
_lowerCamelCase : List[Any] = torch.zeros(
self.model_tester.batch_size,dtype=torch.long,device=__A )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Dict = AlbertModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : List[str] = type
self.model_tester.create_and_check_model(*__A )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = AlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Union[str, Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape,__A )
_lowerCamelCase : Dict = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase_ : Any = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : str = 'RegNetConfig'
# Base docstring
UpperCAmelCase_ : Tuple = 'facebook/regnet-y-040'
UpperCAmelCase_ : Optional[int] = [1, 1088, 7, 7]
# Image classification docstring
UpperCAmelCase_ : Dict = 'facebook/regnet-y-040'
UpperCAmelCase_ : Tuple = 'tabby, tabby cat'
UpperCAmelCase_ : Optional[Any] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any,__A : int,__A : int = 3,__A : int = 1,__A : int = 1,__A : Optional[str] = "relu",**__A : str,):
super().__init__(**__A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCamelCase : Optional[int] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCamelCase : str = tf.keras.layers.ConvaD(
filters=__A,kernel_size=__A,strides=__A,padding="VALID",groups=__A,use_bias=__A,name="convolution",)
_lowerCamelCase : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5,momentum=0.9,name="normalization" )
_lowerCamelCase : Tuple = ACTaFN[activation] if activation is not None else tf.identity
def lowerCamelCase_ ( self : Any,__A : Optional[int] ):
_lowerCamelCase : int = self.convolution(self.padding(__A ) )
_lowerCamelCase : List[str] = self.normalization(__A )
_lowerCamelCase : Union[str, Any] = self.activation(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Tuple,__A : RegNetConfig,**__A : List[Any] ):
super().__init__(**__A )
_lowerCamelCase : Tuple = config.num_channels
_lowerCamelCase : int = TFRegNetConvLayer(
out_channels=config.embedding_size,kernel_size=3,stride=2,activation=config.hidden_act,name="embedder",)
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : List[str] = shape_list(__A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCamelCase : str = tf.transpose(__A,perm=(0, 2, 3, 1) )
_lowerCamelCase : Optional[int] = self.embedder(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any],__A : int,__A : int = 2,**__A : List[str] ):
super().__init__(**__A )
_lowerCamelCase : List[str] = tf.keras.layers.ConvaD(
filters=__A,kernel_size=1,strides=__A,use_bias=__A,name="convolution" )
_lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1e-5,momentum=0.9,name="normalization" )
def lowerCamelCase_ ( self : str,__A : tf.Tensor,__A : bool = False ):
return self.normalization(self.convolution(__A ),training=__A )
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any],__A : int,__A : int,**__A : Optional[Any] ):
super().__init__(**__A )
_lowerCamelCase : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__A,name="pooler" )
_lowerCamelCase : Optional[Any] = [
tf.keras.layers.ConvaD(filters=__A,kernel_size=1,activation="relu",name="attention.0" ),
tf.keras.layers.ConvaD(filters=__A,kernel_size=1,activation="sigmoid",name="attention.2" ),
]
def lowerCamelCase_ ( self : Optional[int],__A : int ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_lowerCamelCase : Union[str, Any] = self.pooler(__A )
for layer_module in self.attention:
_lowerCamelCase : List[Any] = layer_module(__A )
_lowerCamelCase : List[str] = hidden_state * pooled
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any,__A : RegNetConfig,__A : int,__A : int,__A : int = 1,**__A : str ):
super().__init__(**__A )
_lowerCamelCase : Dict = in_channels != out_channels or stride != 1
_lowerCamelCase : Dict = max(1,out_channels // config.groups_width )
_lowerCamelCase : Any = (
TFRegNetShortCut(__A,stride=__A,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear",name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(__A,kernel_size=1,activation=config.hidden_act,name="layer.0" ),
TFRegNetConvLayer(
__A,stride=__A,groups=__A,activation=config.hidden_act,name="layer.1" ),
TFRegNetConvLayer(__A,kernel_size=1,activation=__A,name="layer.2" ),
]
_lowerCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self : Tuple,__A : Optional[int] ):
_lowerCamelCase : List[str] = hidden_state
for layer_module in self.layers:
_lowerCamelCase : int = layer_module(__A )
_lowerCamelCase : int = self.shortcut(__A )
hidden_state += residual
_lowerCamelCase : int = self.activation(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any],__A : RegNetConfig,__A : int,__A : int,__A : int = 1,**__A : List[str] ):
super().__init__(**__A )
_lowerCamelCase : str = in_channels != out_channels or stride != 1
_lowerCamelCase : str = max(1,out_channels // config.groups_width )
_lowerCamelCase : Optional[Any] = (
TFRegNetShortCut(__A,stride=__A,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear",name="shortcut" )
)
_lowerCamelCase : Dict = [
TFRegNetConvLayer(__A,kernel_size=1,activation=config.hidden_act,name="layer.0" ),
TFRegNetConvLayer(
__A,stride=__A,groups=__A,activation=config.hidden_act,name="layer.1" ),
TFRegNetSELayer(__A,reduced_channels=int(round(in_channels / 4 ) ),name="layer.2" ),
TFRegNetConvLayer(__A,kernel_size=1,activation=__A,name="layer.3" ),
]
_lowerCamelCase : Any = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self : str,__A : str ):
_lowerCamelCase : Any = hidden_state
for layer_module in self.layers:
_lowerCamelCase : Optional[int] = layer_module(__A )
_lowerCamelCase : Optional[Any] = self.shortcut(__A )
hidden_state += residual
_lowerCamelCase : Optional[int] = self.activation(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str,__A : RegNetConfig,__A : int,__A : int,__A : int = 2,__A : int = 2,**__A : Dict ):
super().__init__(**__A )
_lowerCamelCase : List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_lowerCamelCase : int = [
# downsampling is done in the first layer with stride of 2
layer(__A,__A,__A,stride=__A,name="layers.0" ),
*[layer(__A,__A,__A,name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def lowerCamelCase_ ( self : Dict,__A : Dict ):
for layer_module in self.layers:
_lowerCamelCase : str = layer_module(__A )
return hidden_state
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str,__A : RegNetConfig,**__A : Union[str, Any] ):
super().__init__(**__A )
_lowerCamelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__A,config.embedding_size,config.hidden_sizes[0],stride=2 if config.downsample_in_first_stage else 1,depth=config.depths[0],name="stages.0",) )
_lowerCamelCase : str = zip(config.hidden_sizes,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__A,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__A,__A,__A,depth=__A,name=f'stages.{i+1}' ) )
def lowerCamelCase_ ( self : Optional[int],__A : tf.Tensor,__A : bool = False,__A : bool = True ):
_lowerCamelCase : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCamelCase : str = hidden_states + (hidden_state,)
_lowerCamelCase : List[Any] = stage_module(__A )
if output_hidden_states:
_lowerCamelCase : List[str] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__A,hidden_states=__A )
@keras_serializable
class UpperCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase_ = RegNetConfig
def __init__( self : Dict,__A : List[str],**__A : Union[str, Any] ):
super().__init__(**__A )
_lowerCamelCase : List[Any] = config
_lowerCamelCase : str = TFRegNetEmbeddings(__A,name="embedder" )
_lowerCamelCase : str = TFRegNetEncoder(__A,name="encoder" )
_lowerCamelCase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__A,name="pooler" )
@unpack_inputs
def lowerCamelCase_ ( self : Union[str, Any],__A : tf.Tensor,__A : Optional[bool] = None,__A : Optional[bool] = None,__A : bool = False,):
_lowerCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Union[str, Any] = self.embedder(__A,training=__A )
_lowerCamelCase : Any = self.encoder(
__A,output_hidden_states=__A,return_dict=__A,training=__A )
_lowerCamelCase : Union[str, Any] = encoder_outputs[0]
_lowerCamelCase : str = self.pooler(__A )
# Change to NCHW output format have uniformity in the modules
_lowerCamelCase : Optional[Any] = tf.transpose(__A,perm=(0, 3, 1, 2) )
_lowerCamelCase : str = tf.transpose(__A,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCamelCase : Tuple = tuple([tf.transpose(__A,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__A,pooler_output=__A,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states,)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = 'regnet'
lowerCAmelCase_ = 'pixel_values'
@property
def lowerCamelCase_ ( self : List[str] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4),dtype=tf.floataa )}
UpperCAmelCase_ : str = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase_ : int = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , A , )
class UpperCAmelCase__ ( A ):
def __init__( self : Optional[Any],__A : RegNetConfig,*__A : int,**__A : Optional[Any] ):
super().__init__(__A,*__A,**__A )
_lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(__A,name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,output_type=__A,config_class=_CONFIG_FOR_DOC,modality="vision",expected_output=_EXPECTED_OUTPUT_SHAPE,)
def lowerCamelCase_ ( self : str,__A : tf.Tensor,__A : Optional[bool] = None,__A : Optional[bool] = None,__A : Optional[int]=False,):
_lowerCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : str = self.regnet(
pixel_values=__A,output_hidden_states=__A,return_dict=__A,training=__A,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state,pooler_output=outputs.pooler_output,hidden_states=outputs.hidden_states,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , A , )
class UpperCAmelCase__ ( A , A ):
def __init__( self : Union[str, Any],__A : RegNetConfig,*__A : Optional[int],**__A : Tuple ):
super().__init__(__A,*__A,**__A )
_lowerCamelCase : List[Any] = config.num_labels
_lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(__A,name="regnet" )
# classification head
_lowerCamelCase : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels,name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT,output_type=__A,config_class=_CONFIG_FOR_DOC,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,)
def lowerCamelCase_ ( self : Optional[Any],__A : tf.Tensor = None,__A : tf.Tensor = None,__A : bool = None,__A : bool = None,__A : Union[str, Any]=False,):
_lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCamelCase : Tuple = self.regnet(
__A,output_hidden_states=__A,return_dict=__A,training=__A )
_lowerCamelCase : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
_lowerCamelCase : int = self.classifier[0](__A )
_lowerCamelCase : Optional[int] = self.classifier[1](__A )
_lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=__A,logits=__A )
if not return_dict:
_lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__A,logits=__A,hidden_states=outputs.hidden_states ) | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
UpperCAmelCase_ : int = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Optional[Any],__A : Optional[Any]=None ):
_lowerCamelCase : List[Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self,__A,getattr(__A,__A ) )
_lowerCamelCase : List[str] = module._original_module if isinstance(__A,_PatchedModuleObj ) else module
class UpperCAmelCase__ :
lowerCAmelCase_ = []
def __init__( self : Any,__A : int,__A : str,__A : List[str],__A : Optional[Any]=None ):
_lowerCamelCase : Union[str, Any] = obj
_lowerCamelCase : List[Any] = target
_lowerCamelCase : Any = new
_lowerCamelCase : Optional[int] = target.split("." )[0]
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Any = attrs or []
def __enter__( self : Optional[Any] ):
*_lowerCamelCase , _lowerCamelCase : Tuple = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__A ) ):
try:
_lowerCamelCase : List[Any] = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowerCamelCase : str = getattr(self.obj,__A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__A,_PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_lowerCamelCase : Optional[int] = obj_attr
# patch at top level
setattr(self.obj,__A,_PatchedModuleObj(__A,attrs=self.attrs ) )
_lowerCamelCase : Dict = getattr(self.obj,__A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__A,__A,_PatchedModuleObj(getattr(__A,__A,__A ),attrs=self.attrs ) )
_lowerCamelCase : Tuple = getattr(__A,__A )
# finally set the target attribute
setattr(__A,__A,self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowerCamelCase : List[Any] = getattr(import_module(".".join(__A ) ),__A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj,__A ) is attr_value:
_lowerCamelCase : int = getattr(self.obj,__A )
setattr(self.obj,__A,self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowerCamelCase : Any = globals()["__builtins__"][target_attr]
setattr(self.obj,__A,self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Dict,*__A : int ):
for attr in list(self.original ):
setattr(self.obj,__A,self.original.pop(__A ) )
def lowerCamelCase_ ( self : Optional[int] ):
self.__enter__()
self._active_patches.append(self )
def lowerCamelCase_ ( self : int ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class UpperCAmelCase__ :
def lowerCamelCase_ ( self : Tuple,__A : Union[str, Any],__A : Tuple ):
pass
def lowerCamelCase_ ( self : List[Any] ):
pass
def lowerCamelCase_ ( self : Optional[int] ):
pass
def lowerCamelCase_ ( self : str,__A : Tuple,__A : str,__A : Union[str, Any],__A : Any,__A : Union[str, Any]=None,**__A : Union[str, Any] ):
_lowerCamelCase : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(__A,__A )
_lowerCamelCase : Optional[int] = TFVisionTextDualEncoderModel(__A )
_lowerCamelCase : Union[str, Any] = model(input_ids=__A,pixel_values=__A,attention_mask=__A )
self.assertEqual(output["text_embeds"].shape,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape,(pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : Tuple,__A : List[Any],__A : List[str],__A : Union[str, Any]=None,**__A : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = self.get_vision_text_model(__A,__A )
_lowerCamelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=__A,text_model=__A )
_lowerCamelCase : Any = model(input_ids=__A,pixel_values=__A,attention_mask=__A )
self.assertEqual(output["text_embeds"].shape,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape,(pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : Dict,__A : Optional[Any],__A : int,__A : Optional[int]=None,**__A : int ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.get_vision_text_model(__A,__A )
_lowerCamelCase : Dict = {"vision_model": vision_model, "text_model": text_model}
_lowerCamelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__A )
_lowerCamelCase : List[str] = model(input_ids=__A,pixel_values=__A,attention_mask=__A )
self.assertEqual(output["text_embeds"].shape,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape,(pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase_ ( self : List[Any],__A : List[str],__A : List[Any],__A : Optional[int],__A : Dict,__A : Optional[int]=None,**__A : Tuple ):
_lowerCamelCase , _lowerCamelCase : List[str] = self.get_vision_text_model(__A,__A )
_lowerCamelCase : List[Any] = TFVisionTextDualEncoderModel(vision_model=__A,text_model=__A )
_lowerCamelCase : Optional[int] = model(input_ids=__A,pixel_values=__A,attention_mask=__A )
_lowerCamelCase : List[str] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
_lowerCamelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(__A )
_lowerCamelCase : Any = model(input_ids=__A,pixel_values=__A,attention_mask=__A )
_lowerCamelCase : Optional[int] = after_output[0].numpy()
_lowerCamelCase : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A,1e-5 )
def lowerCamelCase_ ( self : Tuple,__A : Optional[Any],__A : List[str],__A : str,__A : Dict,__A : int=None,**__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : int = self.get_vision_text_model(__A,__A )
_lowerCamelCase : Any = TFVisionTextDualEncoderModel(vision_model=__A,text_model=__A )
_lowerCamelCase : str = model(
input_ids=__A,pixel_values=__A,attention_mask=__A,output_attentions=__A )
_lowerCamelCase : Any = output.vision_model_output.attentions
self.assertEqual(len(__A ),vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCamelCase : Any = to_atuple(vision_model.config.image_size )
_lowerCamelCase : Any = to_atuple(vision_model.config.patch_size )
_lowerCamelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCamelCase : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:],(vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCamelCase : List[Any] = output.text_model_output.attentions
self.assertEqual(len(__A ),text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:],(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]),)
def lowerCamelCase_ ( self : int,__A : np.ndarray,__A : np.ndarray,__A : float ):
_lowerCamelCase : List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(__A,__A,f'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__A )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__A )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**__A )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__A )
@slow
def lowerCamelCase_ ( self : int ):
_lowerCamelCase , _lowerCamelCase : Any = self.get_pretrained_model_and_inputs()
_lowerCamelCase : Tuple = model_a(**__A )
_lowerCamelCase : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__A )
_lowerCamelCase : int = TFVisionTextDualEncoderModel.from_pretrained(__A )
_lowerCamelCase : List[str] = model_a(**__A )
_lowerCamelCase : Any = after_outputs[0].numpy()
_lowerCamelCase : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A,1e-5 )
@require_tf
class UpperCAmelCase__ ( A , unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit","hf-internal-testing/tiny-random-bert" )
_lowerCamelCase : List[str] = 1_3
_lowerCamelCase : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCamelCase : Optional[Any] = ids_tensor([batch_size, 4],model.text_model.config.vocab_size )
_lowerCamelCase : Tuple = random_attention_mask([batch_size, 4] )
_lowerCamelCase : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],__A : Optional[Any] ):
_lowerCamelCase : List[Any] = TFViTModel(__A,name="vision_model" )
_lowerCamelCase : str = TFBertModel(__A,name="text_model" )
return vision_model, text_model
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Dict = TFViTModelTester(self )
_lowerCamelCase : int = TFBertModelTester(self )
_lowerCamelCase : Tuple = vit_model_tester.prepare_config_and_inputs()
_lowerCamelCase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = vision_config_and_inputs
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase__ ( A , unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCamelCase : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf","hf-internal-testing/tiny-random-roberta" )
_lowerCamelCase : int = 1_3
_lowerCamelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCamelCase : Optional[int] = ids_tensor([batch_size, 4],model.text_model.config.vocab_size )
_lowerCamelCase : Optional[int] = random_attention_mask([batch_size, 4] )
_lowerCamelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCamelCase_ ( self : Dict,__A : Optional[Any],__A : str,__A : Tuple,__A : Optional[Any],__A : str=None,**__A : List[str] ):
_lowerCamelCase , _lowerCamelCase : Dict = self.get_vision_text_model(__A,__A )
_lowerCamelCase : int = TFVisionTextDualEncoderModel(vision_model=__A,text_model=__A )
_lowerCamelCase : List[Any] = model(
input_ids=__A,pixel_values=__A,attention_mask=__A,output_attentions=__A )
_lowerCamelCase : int = output.vision_model_output.attentions
self.assertEqual(len(__A ),vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Union[str, Any] = to_atuple(vision_model.config.image_size )
_lowerCamelCase : Tuple = to_atuple(vision_model.config.patch_size )
_lowerCamelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCamelCase : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:],(vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCamelCase : Any = output.text_model_output.attentions
self.assertEqual(len(__A ),text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:],(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]),)
def lowerCamelCase_ ( self : int,__A : List[str],__A : Any ):
_lowerCamelCase : Union[str, Any] = TFDeiTModel(__A,name="vision_model" )
_lowerCamelCase : str = TFRobertaModel(__A,name="text_model" )
return vision_model, text_model
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = TFDeiTModelTester(self )
_lowerCamelCase : Union[str, Any] = TFRobertaModelTester(self )
_lowerCamelCase : str = vit_model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = vision_config_and_inputs
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase__ ( A , unittest.TestCase ):
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf","hf-internal-testing/tiny-random-bert" )
_lowerCamelCase : List[Any] = 1_3
_lowerCamelCase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCamelCase : List[str] = ids_tensor([batch_size, 4],model.text_model.config.vocab_size )
_lowerCamelCase : List[Any] = random_attention_mask([batch_size, 4] )
_lowerCamelCase : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any],__A : List[str] ):
_lowerCamelCase : Tuple = TFCLIPVisionModel(__A,name="vision_model" )
_lowerCamelCase : int = TFBertModel(__A,name="text_model" )
return vision_model, text_model
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = TFCLIPVisionModelTester(self )
_lowerCamelCase : Dict = TFBertModelTester(self )
_lowerCamelCase : List[str] = clip_model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase : Optional[Any] = vision_config_and_inputs
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian",logit_scale_init_value=1.0,from_pt=__A )
_lowerCamelCase : List[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_lowerCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCamelCase : str = processor(
text=["una foto di un gatto", "una foto di un cane"],images=__A,padding=__A,return_tensors="np" )
_lowerCamelCase : int = model(**__A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]),)
_lowerCamelCase : Dict = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy(),__A,atol=1e-3 ) ) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : Any ):
_lowerCamelCase : Any = str(id_ )
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Optional[Any] = {} # {vertex:distance}
def __lt__( self : int,__A : List[Any] ):
return self.key < other.key
def __repr__( self : List[str] ):
return self.id
def lowerCamelCase_ ( self : List[str],__A : Optional[int] ):
self.neighbors.append(__A )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int],__A : int ):
_lowerCamelCase : Optional[Any] = weight
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list , _lowerCAmelCase : Vertex ):
"""simple docstring"""
_lowerCamelCase : str = []
for u in graph:
_lowerCamelCase : Any = math.inf
_lowerCamelCase : List[str] = None
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Tuple = graph[:]
while q:
_lowerCamelCase : List[str] = min(_lowerCAmelCase )
q.remove(_lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCamelCase : int = u
_lowerCamelCase : Optional[Any] = u.edges[v.id]
for i in range(1 , len(_lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A_ ( _lowerCAmelCase : list , _lowerCAmelCase : Vertex ):
"""simple docstring"""
for u in graph:
_lowerCamelCase : Any = math.inf
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = list(_lowerCAmelCase )
hq.heapify(_lowerCAmelCase )
while h:
_lowerCamelCase : Dict = hq.heappop(_lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCamelCase : List[Any] = u
_lowerCamelCase : List[str] = u.edges[v.id]
hq.heapify(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'bert-generation'
def __init__( self : Optional[int],__A : List[Any]=5_0_3_5_8,__A : str=1_0_2_4,__A : Union[str, Any]=2_4,__A : Optional[Any]=1_6,__A : List[Any]=4_0_9_6,__A : Dict="gelu",__A : Optional[int]=0.1,__A : Union[str, Any]=0.1,__A : int=5_1_2,__A : str=0.02,__A : Dict=1e-12,__A : Optional[Any]=0,__A : Tuple=2,__A : Optional[Any]=1,__A : Union[str, Any]="absolute",__A : Union[str, Any]=True,**__A : int,):
super().__init__(pad_token_id=__A,bos_token_id=__A,eos_token_id=__A,**__A )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : Any = position_embedding_type
_lowerCamelCase : List[Any] = use_cache | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for i in range(len(_lowerCAmelCase ) - 1 , 0 , -1 ):
_lowerCamelCase : Tuple = False
for j in range(_lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
_lowerCamelCase , _lowerCamelCase : Dict = unsorted[j - 1], unsorted[j]
_lowerCamelCase : str = True
for j in range(_lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
_lowerCamelCase , _lowerCamelCase : Dict = unsorted[j + 1], unsorted[j]
_lowerCamelCase : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : List[Any] = [int(item) for item in user_input.split(',')]
print(f'''{cocktail_shaker_sort(unsorted) = }''') | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'timm_backbone'
def __init__( self : Tuple,__A : int=None,__A : int=3,__A : List[Any]=True,__A : Optional[int]=True,__A : List[Any]=None,**__A : int,):
super().__init__(**__A )
_lowerCamelCase : Optional[int] = backbone
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Tuple = features_only
_lowerCamelCase : Optional[Any] = use_pretrained_backbone
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : List[str] = out_indices if out_indices is not None else (-1,) | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
import requests
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = {"Content-Type": "application/json"}
_lowerCamelCase : Optional[Any] = requests.post(_lowerCAmelCase , json={"text": message_body} , headers=_lowerCAmelCase )
if response.status_code != 200:
_lowerCamelCase : str = (
"Request to slack returned an error "
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>') | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase_ : Any = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = jnp.floataa
lowerCAmelCase_ = True
def lowerCamelCase_ ( self : Any ):
super().setup()
_lowerCamelCase : str = nn.Dense(5,dtype=self.dtype )
def __call__( self : Optional[int],*__A : Optional[int],**__A : Union[str, Any] ):
_lowerCamelCase : Optional[int] = super().__call__(*__A,**__A )
_lowerCamelCase : int = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = FlaxBigBirdForNaturalQuestionsModule
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ):
"""simple docstring"""
def cross_entropy(_lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=None ):
_lowerCamelCase : Optional[int] = logits.shape[-1]
_lowerCamelCase : Optional[Any] = (labels[..., None] == jnp.arange(_lowerCAmelCase )[None]).astype("f4" )
_lowerCamelCase : str = jax.nn.log_softmax(_lowerCAmelCase , axis=-1 )
_lowerCamelCase : int = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_lowerCamelCase : List[Any] = reduction(_lowerCAmelCase )
return loss
_lowerCamelCase : int = partial(_lowerCAmelCase , reduction=jnp.mean )
_lowerCamelCase : List[Any] = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[int] = cross_entropy(_lowerCAmelCase , _lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = "google/bigbird-roberta-base"
lowerCAmelCase_ = 3000
lowerCAmelCase_ = 10500
lowerCAmelCase_ = 128
lowerCAmelCase_ = 3
lowerCAmelCase_ = 1
lowerCAmelCase_ = 5
# tx_args
lowerCAmelCase_ = 3E-5
lowerCAmelCase_ = 0.0
lowerCAmelCase_ = 20000
lowerCAmelCase_ = 0.0_0_9_5
lowerCAmelCase_ = "bigbird-roberta-natural-questions"
lowerCAmelCase_ = "training-expt"
lowerCAmelCase_ = "data/nq-training.jsonl"
lowerCAmelCase_ = "data/nq-validation.jsonl"
def lowerCamelCase_ ( self : Optional[int] ):
os.makedirs(self.base_dir,exist_ok=__A )
_lowerCamelCase : Union[str, Any] = os.path.join(self.base_dir,self.save_dir )
_lowerCamelCase : Any = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 4096 # no dynamic padding on TPUs
def __call__( self : Union[str, Any],__A : List[str] ):
_lowerCamelCase : Dict = self.collate_fn(__A )
_lowerCamelCase : int = jax.tree_util.tree_map(__A,__A )
return batch
def lowerCamelCase_ ( self : List[Any],__A : Optional[Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.fetch_inputs(features["input_ids"] )
_lowerCamelCase : Dict = {
"input_ids": jnp.array(__A,dtype=jnp.intaa ),
"attention_mask": jnp.array(__A,dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"],dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"],dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"],dtype=jnp.intaa ),
}
return batch
def lowerCamelCase_ ( self : Union[str, Any],__A : list ):
_lowerCamelCase : str = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def lowerCamelCase_ ( self : Tuple,__A : list ):
_lowerCamelCase : str = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=None ):
"""simple docstring"""
if seed is not None:
_lowerCamelCase : Union[str, Any] = dataset.shuffle(seed=_lowerCAmelCase )
for i in range(len(_lowerCAmelCase ) // batch_size ):
_lowerCamelCase : Any = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_lowerCAmelCase )
@partial(jax.pmap , axis_name="batch" )
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
def loss_fn(_lowerCAmelCase : str ):
_lowerCamelCase : List[Any] = model_inputs.pop("start_labels" )
_lowerCamelCase : Union[str, Any] = model_inputs.pop("end_labels" )
_lowerCamelCase : Optional[int] = model_inputs.pop("pooled_labels" )
_lowerCamelCase : List[str] = state.apply_fn(**_lowerCAmelCase , params=_lowerCAmelCase , dropout_rng=_lowerCAmelCase , train=_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = outputs
return state.loss_fn(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
_lowerCamelCase , _lowerCamelCase : List[Any] = jax.random.split(_lowerCAmelCase )
_lowerCamelCase : Tuple = jax.value_and_grad(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : int = grad_fn(state.params )
_lowerCamelCase : Optional[Any] = jax.lax.pmean({"loss": loss} , axis_name="batch" )
_lowerCamelCase : Union[str, Any] = jax.lax.pmean(_lowerCAmelCase , "batch" )
_lowerCamelCase : Union[str, Any] = state.apply_gradients(grads=_lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def A_ ( _lowerCAmelCase : int , **_lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = model_inputs.pop("start_labels" )
_lowerCamelCase : Any = model_inputs.pop("end_labels" )
_lowerCamelCase : Any = model_inputs.pop("pooled_labels" )
_lowerCamelCase : Optional[int] = state.apply_fn(**_lowerCAmelCase , params=state.params , train=_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = outputs
_lowerCamelCase : List[str] = state.loss_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class UpperCAmelCase__ ( train_state.TrainState ):
lowerCAmelCase_ = struct.field(pytree_node=A )
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
def lowerCamelCase_ ( self : Tuple,__A : List[Any],__A : int,__A : List[str],__A : Tuple=None ):
_lowerCamelCase : Optional[Any] = model.params
_lowerCamelCase : List[Any] = TrainState.create(
apply_fn=model.__call__,params=__A,tx=__A,loss_fn=__A,)
if ckpt_dir is not None:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = restore_checkpoint(__A,__A )
_lowerCamelCase : List[str] = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
_lowerCamelCase , _lowerCamelCase : Any = build_tx(**__A )
_lowerCamelCase : int = train_state.TrainState(
step=__A,apply_fn=model.__call__,params=__A,tx=__A,opt_state=__A,)
_lowerCamelCase : int = args
_lowerCamelCase : str = data_collator
_lowerCamelCase : Tuple = lr
_lowerCamelCase : Tuple = params
_lowerCamelCase : Tuple = jax_utils.replicate(__A )
return state
def lowerCamelCase_ ( self : List[Any],__A : int,__A : Dict,__A : Optional[Any] ):
_lowerCamelCase : str = self.args
_lowerCamelCase : Union[str, Any] = len(__A ) // args.batch_size
_lowerCamelCase : Dict = jax.random.PRNGKey(0 )
_lowerCamelCase : Optional[int] = jax.random.split(__A,jax.device_count() )
for epoch in range(args.max_epochs ):
_lowerCamelCase : List[str] = jnp.array(0,dtype=jnp.floataa )
_lowerCamelCase : Any = get_batched_dataset(__A,args.batch_size,seed=__A )
_lowerCamelCase : Union[str, Any] = 0
for batch in tqdm(__A,total=__A,desc=f'Running EPOCH-{epoch}' ):
_lowerCamelCase : Optional[Any] = self.data_collator(__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.train_step_fn(__A,__A,**__A )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
_lowerCamelCase : Dict = jax_utils.unreplicate(state.step )
_lowerCamelCase : Optional[Any] = running_loss.item() / i
_lowerCamelCase : Tuple = self.scheduler_fn(state_step - 1 )
_lowerCamelCase : Any = self.evaluate(__A,__A )
_lowerCamelCase : str = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A,commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}',state=__A )
def lowerCamelCase_ ( self : Union[str, Any],__A : int,__A : Tuple ):
_lowerCamelCase : List[Any] = get_batched_dataset(__A,self.args.batch_size )
_lowerCamelCase : str = len(__A ) // self.args.batch_size
_lowerCamelCase : Optional[Any] = jnp.array(0,dtype=jnp.floataa )
_lowerCamelCase : Dict = 0
for batch in tqdm(__A,total=__A,desc="Evaluating ... " ):
_lowerCamelCase : str = self.data_collator(__A )
_lowerCamelCase : Dict = self.val_step_fn(__A,**__A )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def lowerCamelCase_ ( self : List[str],__A : Optional[Any],__A : Union[str, Any] ):
_lowerCamelCase : Any = jax_utils.unreplicate(__A )
print(f'SAVING CHECKPOINT IN {save_dir}',end=" ... " )
self.model_save_fn(__A,params=state.params )
with open(os.path.join(__A,"opt_state.msgpack" ),"wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args,os.path.join(__A,"args.joblib" ) )
joblib.dump(self.data_collator,os.path.join(__A,"data_collator.joblib" ) )
with open(os.path.join(__A,"training_state.json" ),"w" ) as f:
json.dump({"step": state.step.item()},__A )
print("DONE" )
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=" ... " )
with open(os.path.join(_lowerCAmelCase , "flax_model.msgpack" ) , "rb" ) as f:
_lowerCamelCase : str = from_bytes(state.params , f.read() )
with open(os.path.join(_lowerCAmelCase , "opt_state.msgpack" ) , "rb" ) as f:
_lowerCamelCase : List[Any] = from_bytes(state.opt_state , f.read() )
_lowerCamelCase : Optional[int] = joblib.load(os.path.join(_lowerCAmelCase , "args.joblib" ) )
_lowerCamelCase : List[Any] = joblib.load(os.path.join(_lowerCAmelCase , "data_collator.joblib" ) )
with open(os.path.join(_lowerCAmelCase , "training_state.json" ) , "r" ) as f:
_lowerCamelCase : str = json.load(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = num_train_steps - warmup_steps
_lowerCamelCase : Dict = optax.linear_schedule(init_value=_lowerCAmelCase , end_value=_lowerCAmelCase , transition_steps=_lowerCAmelCase )
_lowerCamelCase : Dict = optax.linear_schedule(init_value=_lowerCAmelCase , end_value=1E-7 , transition_steps=_lowerCAmelCase )
_lowerCamelCase : List[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
def weight_decay_mask(_lowerCAmelCase : List[Any] ):
_lowerCamelCase : Tuple = traverse_util.flatten_dict(_lowerCAmelCase )
_lowerCamelCase : Tuple = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(_lowerCAmelCase )
_lowerCamelCase : int = scheduler_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = optax.adamw(learning_rate=_lowerCAmelCase , weight_decay=_lowerCAmelCase , mask=_lowerCAmelCase )
return tx, lr | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
lowerCAmelCase_ = PegasusConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = 'gelu'
def __init__( self : str,__A : Tuple,__A : Optional[Any]=1_3,__A : Tuple=7,__A : List[str]=True,__A : Any=False,__A : Tuple=9_9,__A : List[Any]=3_2,__A : Union[str, Any]=2,__A : Dict=4,__A : int=3_7,__A : Optional[Any]=0.1,__A : Optional[Any]=0.1,__A : Optional[int]=4_0,__A : List[str]=2,__A : List[str]=1,__A : List[Any]=0,):
_lowerCamelCase : Any = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Any = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[str] = eos_token_id
_lowerCamelCase : List[str] = pad_token_id
_lowerCamelCase : Union[str, Any] = bos_token_id
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size )
_lowerCamelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ),1 )
_lowerCamelCase : Any = tf.concat([input_ids, eos_tensor],axis=1 )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_ids=[2],bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,decoder_start_token_id=self.pad_token_id,**self.config_updates,)
_lowerCamelCase : str = prepare_pegasus_inputs_dict(__A,__A,__A )
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any],__A : int,__A : int ):
_lowerCamelCase : Any = TFPegasusModel(config=__A ).get_decoder()
_lowerCamelCase : List[str] = inputs_dict["input_ids"]
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Any = inputs_dict["attention_mask"][:1, :]
_lowerCamelCase : Tuple = inputs_dict["head_mask"]
_lowerCamelCase : Optional[int] = 1
# first forward pass
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A,head_mask=__A,use_cache=__A )
_lowerCamelCase , _lowerCamelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : int = ids_tensor((self.batch_size, 3),config.vocab_size )
_lowerCamelCase : int = tf.cast(ids_tensor((self.batch_size, 3),2 ),tf.inta )
# append to next input_ids and
_lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens],axis=-1 )
_lowerCamelCase : List[str] = tf.concat([attention_mask, next_attn_mask],axis=-1 )
_lowerCamelCase : Dict = model(__A,attention_mask=__A )[0]
_lowerCamelCase : List[str] = model(__A,attention_mask=__A,past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1],output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Optional[int] = int(ids_tensor((1,),output_from_past.shape[-1] ) )
_lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A,__A,rtol=1e-3 )
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = TFPegasusModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self,config_class=__A )
def lowerCamelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCAmelCase_ = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCAmelCase_ = 'google/pegasus-xsum'
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase_ ( self : List[Any],**__A : Dict ):
_lowerCamelCase : Union[str, Any] = self.translate_src_text(**__A )
assert self.expected_text == generated_words
def lowerCamelCase_ ( self : str,**__A : Dict ):
_lowerCamelCase : Dict = self.tokenizer(self.src_text,**__A,padding=__A,return_tensors="tf" )
_lowerCamelCase : Any = self.model.generate(
model_inputs.input_ids,attention_mask=model_inputs.attention_mask,num_beams=2,use_cache=__A,)
_lowerCamelCase : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy(),skip_special_tokens=__A )
return generated_words
@slow
def lowerCamelCase_ ( self : int ):
self._assert_generated_batch_equal_expected() | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
UpperCAmelCase_ : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
UpperCAmelCase_ : int = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
UpperCAmelCase_ : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
UpperCAmelCase_ : Optional[int] = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
UpperCAmelCase_ : List[Any] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
UpperCAmelCase_ : Optional[Any] = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
UpperCAmelCase_ : Optional[int] = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def A_ ( ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = randrange(len(_lowerCAmelCase ) ), randrange(len(_lowerCAmelCase ) )
_lowerCamelCase : List[str] = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
_lowerCamelCase , _lowerCamelCase : List[str] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def A_ ( _lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(_lowerCAmelCase ))
@pytest.mark.parametrize("hand, expected" , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
assert PokerHand(_lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] ):
"""simple docstring"""
assert PokerHand(_lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = PokerHand(_lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int ):
"""simple docstring"""
assert PokerHand(_lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert PokerHand(_lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
"""simple docstring"""
assert PokerHand(_lowerCAmelCase ).compare_with(PokerHand(_lowerCAmelCase ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ):
"""simple docstring"""
assert PokerHand(_lowerCAmelCase ).compare_with(PokerHand(_lowerCAmelCase ) ) == expected
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Tuple = [PokerHand(_lowerCAmelCase ) for hand in SORTED_HANDS]
_lowerCamelCase : int = poker_hands.copy()
shuffle(_lowerCAmelCase )
_lowerCamelCase : List[str] = chain(sorted(_lowerCAmelCase ) )
for index, hand in enumerate(_lowerCAmelCase ):
assert hand == poker_hands[index]
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=_lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[Any] = PokerHand("2C 4S AS 3D 5C" )
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Dict = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = 0
_lowerCamelCase : Union[str, Any] = os.path.abspath(os.path.dirname(_lowerCAmelCase ) )
_lowerCamelCase : Dict = os.path.join(_lowerCAmelCase , "poker_hands.txt" )
with open(_lowerCAmelCase ) as file_hand:
for line in file_hand:
_lowerCamelCase : Tuple = line[:14].strip()
_lowerCamelCase : List[Any] = line[15:].strip()
_lowerCamelCase , _lowerCamelCase : str = PokerHand(_lowerCAmelCase ), PokerHand(_lowerCAmelCase )
_lowerCamelCase : int = player.compare_with(_lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : int ):
_lowerCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_lowerCamelCase : Optional[int] = False
def lowerCamelCase_ ( self : str,__A : list[str] ):
for word in words:
self.insert(__A )
def lowerCamelCase_ ( self : Tuple,__A : str ):
_lowerCamelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
_lowerCamelCase : Tuple = TrieNode()
_lowerCamelCase : Any = curr.nodes[char]
_lowerCamelCase : List[str] = True
def lowerCamelCase_ ( self : Any,__A : str ):
_lowerCamelCase : Dict = self
for char in word:
if char not in curr.nodes:
return False
_lowerCamelCase : Optional[int] = curr.nodes[char]
return curr.is_leaf
def lowerCamelCase_ ( self : Any,__A : str ):
def _delete(__A : TrieNode,__A : str,__A : int ) -> bool:
if index == len(__A ):
# If word does not exist
if not curr.is_leaf:
return False
_lowerCamelCase : int = False
return len(curr.nodes ) == 0
_lowerCamelCase : Dict = word[index]
_lowerCamelCase : Optional[Any] = curr.nodes.get(__A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_lowerCamelCase : List[Any] = _delete(__A,__A,index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self,__A,0 )
def A_ ( _lowerCAmelCase : TrieNode , _lowerCAmelCase : str ):
"""simple docstring"""
if node.is_leaf:
print(_lowerCAmelCase , end=" " )
for key, value in node.nodes.items():
print_words(_lowerCAmelCase , word + key )
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "banana bananas bandana band apple all beast".split()
_lowerCamelCase : str = TrieNode()
root.insert_many(_lowerCAmelCase )
# print_words(root, "")
assert all(root.find(_lowerCAmelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : bool ):
"""simple docstring"""
print(str(_lowerCAmelCase ) , "works!" if passes else "doesn't work :(" )
def A_ ( ):
"""simple docstring"""
assert test_trie()
def A_ ( ):
"""simple docstring"""
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def A_ ( _lowerCAmelCase : int = 8 ):
"""simple docstring"""
_lowerCamelCase : str = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
i -= len(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = i // 3
_lowerCamelCase : str = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_lowerCamelCase : Optional[Any] = (
chars_incl
+ random(_lowerCAmelCase , quotient + remainder )
+ random(_lowerCAmelCase , _lowerCAmelCase )
+ random(_lowerCAmelCase , _lowerCAmelCase )
)
_lowerCamelCase : Optional[Any] = list(_lowerCAmelCase )
shuffle(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
# random is a generalised function for letters, characters and numbers
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ):
"""simple docstring"""
pass # Put your code here...
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
pass # Put your code here...
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
"""simple docstring"""
pass # Put your code here...
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : int = 8 ):
"""simple docstring"""
if len(_lowerCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
_lowerCamelCase : List[Any] = any(char in ascii_uppercase for char in password )
_lowerCamelCase : str = any(char in ascii_lowercase for char in password )
_lowerCamelCase : Any = any(char in digits for char in password )
_lowerCamelCase : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : str = int(input("Please indicate the max length of your password: " ).strip() )
_lowerCamelCase : Any = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCAmelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCAmelCase , _lowerCAmelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main() | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self : int,__A : list[int] ):
_lowerCamelCase : Any = len(__A )
_lowerCamelCase : List[Any] = [0] * len_array
if len_array > 0:
_lowerCamelCase : Tuple = array[0]
for i in range(1,__A ):
_lowerCamelCase : Any = self.prefix_sum[i - 1] + array[i]
def lowerCamelCase_ ( self : str,__A : int,__A : int ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCamelCase_ ( self : Union[str, Any],__A : int ):
_lowerCamelCase : List[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__A )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
UpperCAmelCase_ : Tuple = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase_ : Union[str, Any] = CLIPImageProcessor()
UpperCAmelCase_ : Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
UpperCAmelCase_ : str = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = ['image_processor']
lowerCAmelCase_ = 'SamImageProcessor'
def __init__( self : Any,__A : Optional[int] ):
super().__init__(__A )
_lowerCamelCase : Union[str, Any] = self.image_processor
_lowerCamelCase : Optional[int] = -1_0
_lowerCamelCase : List[str] = self.image_processor.size["longest_edge"]
def __call__( self : Any,__A : Tuple=None,__A : Any=None,__A : Optional[Any]=None,__A : Dict=None,__A : Optional[Union[str, TensorType]] = None,**__A : str,):
_lowerCamelCase : str = self.image_processor(
__A,return_tensors=__A,**__A,)
# pop arguments that are not used in the foward but used nevertheless
_lowerCamelCase : List[Any] = encoding_image_processor["original_sizes"]
if hasattr(__A,"numpy" ): # Checks if Torch or TF tensor
_lowerCamelCase : Dict = original_sizes.numpy()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self._check_and_preprocess_points(
input_points=__A,input_labels=__A,input_boxes=__A,)
_lowerCamelCase : Union[str, Any] = self._normalize_and_convert(
__A,__A,input_points=__A,input_labels=__A,input_boxes=__A,return_tensors=__A,)
return encoding_image_processor
def lowerCamelCase_ ( self : List[str],__A : List[str],__A : Union[str, Any],__A : List[str]=None,__A : int=None,__A : Any=None,__A : int="pt",):
if input_points is not None:
if len(__A ) != len(__A ):
_lowerCamelCase : Tuple = [
self._normalize_coordinates(self.target_size,__A,original_sizes[0] ) for point in input_points
]
else:
_lowerCamelCase : Dict = [
self._normalize_coordinates(self.target_size,__A,__A )
for point, original_size in zip(__A,__A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_lowerCamelCase , _lowerCamelCase : Dict = self._pad_points_and_labels(__A,__A )
_lowerCamelCase : Optional[int] = np.array(__A )
if input_labels is not None:
_lowerCamelCase : List[Any] = np.array(__A )
if input_boxes is not None:
if len(__A ) != len(__A ):
_lowerCamelCase : Optional[Any] = [
self._normalize_coordinates(self.target_size,__A,original_sizes[0],is_bounding_box=__A )
for box in input_boxes
]
else:
_lowerCamelCase : List[str] = [
self._normalize_coordinates(self.target_size,__A,__A,is_bounding_box=__A )
for box, original_size in zip(__A,__A )
]
_lowerCamelCase : Dict = np.array(__A )
if input_boxes is not None:
if return_tensors == "pt":
_lowerCamelCase : str = torch.from_numpy(__A )
# boxes batch size of 1 by default
_lowerCamelCase : int = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_lowerCamelCase : str = tf.convert_to_tensor(__A )
# boxes batch size of 1 by default
_lowerCamelCase : Optional[Any] = tf.expand_dims(__A,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_lowerCamelCase : Dict = torch.from_numpy(__A )
# point batch size of 1 by default
_lowerCamelCase : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_lowerCamelCase : Any = tf.convert_to_tensor(__A )
# point batch size of 1 by default
_lowerCamelCase : Any = tf.expand_dims(__A,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_lowerCamelCase : Optional[int] = torch.from_numpy(__A )
# point batch size of 1 by default
_lowerCamelCase : List[str] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_lowerCamelCase : Dict = tf.convert_to_tensor(__A )
# point batch size of 1 by default
_lowerCamelCase : Tuple = tf.expand_dims(__A,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def lowerCamelCase_ ( self : List[str],__A : Dict,__A : str ):
_lowerCamelCase : Optional[Any] = max([point.shape[0] for point in input_points] )
_lowerCamelCase : Dict = []
for i, point in enumerate(__A ):
if point.shape[0] != expected_nb_points:
_lowerCamelCase : Tuple = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value],axis=0 )
_lowerCamelCase : Dict = np.append(input_labels[i],[self.point_pad_value] )
processed_input_points.append(__A )
_lowerCamelCase : Tuple = processed_input_points
return input_points, input_labels
def lowerCamelCase_ ( self : int,__A : int,__A : np.ndarray,__A : Union[str, Any],__A : Optional[int]=False ):
_lowerCamelCase , _lowerCamelCase : List[str] = original_size
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.image_processor._get_preprocess_shape(__A,longest_edge=__A )
_lowerCamelCase : str = deepcopy(__A ).astype(__A )
if is_bounding_box:
_lowerCamelCase : Dict = coords.reshape(-1,2,2 )
_lowerCamelCase : int = coords[..., 0] * (new_w / old_w)
_lowerCamelCase : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_lowerCamelCase : List[Any] = coords.reshape(-1,4 )
return coords
def lowerCamelCase_ ( self : List[str],__A : str=None,__A : Tuple=None,__A : List[Any]=None,):
if input_points is not None:
if hasattr(__A,"numpy" ): # Checks for TF or Torch tensor
_lowerCamelCase : Optional[int] = input_points.numpy().tolist()
if not isinstance(__A,__A ) or not isinstance(input_points[0],__A ):
raise ValueError("Input points must be a list of list of floating points." )
_lowerCamelCase : str = [np.array(__A ) for input_point in input_points]
else:
_lowerCamelCase : List[str] = None
if input_labels is not None:
if hasattr(__A,"numpy" ):
_lowerCamelCase : Any = input_labels.numpy().tolist()
if not isinstance(__A,__A ) or not isinstance(input_labels[0],__A ):
raise ValueError("Input labels must be a list of list integers." )
_lowerCamelCase : str = [np.array(__A ) for label in input_labels]
else:
_lowerCamelCase : int = None
if input_boxes is not None:
if hasattr(__A,"numpy" ):
_lowerCamelCase : str = input_boxes.numpy().tolist()
if (
not isinstance(__A,__A )
or not isinstance(input_boxes[0],__A )
or not isinstance(input_boxes[0][0],__A )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_lowerCamelCase : str = [np.array(__A ).astype(np.floataa ) for box in input_boxes]
else:
_lowerCamelCase : str = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(__A ) )
def lowerCamelCase_ ( self : List[str],*__A : int,**__A : Union[str, Any] ):
return self.image_processor.post_process_masks(*__A,**__A ) | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any],__A : List[str],__A : List[str]=7,__A : Union[str, Any]=3,__A : Union[str, Any]=1_8,__A : int=3_0,__A : Dict=4_0_0,__A : Union[str, Any]=True,__A : List[Any]=None,__A : Any=True,__A : Any=[0.5, 0.5, 0.5],__A : str=[0.5, 0.5, 0.5],):
_lowerCamelCase : Any = size if size is not None else {"height": 1_8, "width": 1_8}
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : List[Any] = image_size
_lowerCamelCase : int = min_resolution
_lowerCamelCase : str = max_resolution
_lowerCamelCase : Dict = do_resize
_lowerCamelCase : str = size
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : Dict = image_mean
_lowerCamelCase : int = image_std
def lowerCamelCase_ ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = DPTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[Any] = DPTImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A,"image_mean" ) )
self.assertTrue(hasattr(__A,"image_std" ) )
self.assertTrue(hasattr(__A,"do_normalize" ) )
self.assertTrue(hasattr(__A,"do_resize" ) )
self.assertTrue(hasattr(__A,"size" ) )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{"height": 1_8, "width": 1_8} )
_lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict,size=4_2 )
self.assertEqual(image_processor.size,{"height": 4_2, "width": 4_2} )
def lowerCamelCase_ ( self : List[Any] ):
# Initialize image_processing
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A,Image.Image )
# Test not batched input
_lowerCamelCase : str = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : Optional[int] = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
def lowerCamelCase_ ( self : Dict ):
# Initialize image_processing
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A,np.ndarray )
# Test not batched input
_lowerCamelCase : str = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : str = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
def lowerCamelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_processor_tester,equal_resolution=__A,torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A,torch.Tensor )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0],return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),)
# Test batched
_lowerCamelCase : int = image_processing(__A,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
),) | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str = " " ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = 0
for index, char in enumerate(_lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_lowerCamelCase : List[Any] = index + 1
elif index + 1 == len(_lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod() | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
UpperCAmelCase_ : Union[str, Any] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'maskformer'
lowerCAmelCase_ = {'hidden_size': 'mask_feature_size'}
lowerCAmelCase_ = ['resnet', 'swin']
lowerCAmelCase_ = ['detr']
def __init__( self : str,__A : int = 2_5_6,__A : int = 2_5_6,__A : float = 0.1,__A : bool = False,__A : Optional[Dict] = None,__A : Optional[Dict] = None,__A : float = 0.02,__A : float = 1.0,__A : float = 1.0,__A : float = 1.0,__A : float = 20.0,__A : Optional[bool] = None,**__A : List[str],):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCamelCase : int = SwinConfig(
image_size=3_8_4,in_channels=3,patch_size=4,embed_dim=1_2_8,depths=[2, 2, 1_8, 2],num_heads=[4, 8, 1_6, 3_2],window_size=1_2,drop_path_rate=0.3,out_features=["stage1", "stage2", "stage3", "stage4"],)
if isinstance(__A,__A ):
_lowerCamelCase : Optional[Any] = backbone_config.pop("model_type" )
_lowerCamelCase : str = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase : Union[str, Any] = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCamelCase : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
_lowerCamelCase : Dict = (
decoder_config.pop("model_type" ) if isinstance(__A,__A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(__A,__A ):
_lowerCamelCase : str = CONFIG_MAPPING[decoder_type]
_lowerCamelCase : List[str] = config_class.from_dict(__A )
_lowerCamelCase : Tuple = backbone_config
_lowerCamelCase : str = decoder_config
# main feature dimension for the model
_lowerCamelCase : Dict = fpn_feature_size
_lowerCamelCase : Any = mask_feature_size
# initializer
_lowerCamelCase : Optional[int] = init_std
_lowerCamelCase : str = init_xavier_std
# Hungarian matcher && loss
_lowerCamelCase : Optional[int] = cross_entropy_weight
_lowerCamelCase : Union[str, Any] = dice_weight
_lowerCamelCase : Optional[Any] = mask_weight
_lowerCamelCase : Any = use_auxiliary_loss
_lowerCamelCase : List[Any] = no_object_weight
_lowerCamelCase : List[Any] = output_auxiliary_logits
_lowerCamelCase : int = self.decoder_config.encoder_attention_heads
_lowerCamelCase : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**__A )
@classmethod
def lowerCamelCase_ ( cls : Any,__A : PretrainedConfig,__A : PretrainedConfig,**__A : Optional[int] ):
return cls(
backbone_config=__A,decoder_config=__A,**__A,)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Dict = self.backbone_config.to_dict()
_lowerCamelCase : Union[str, Any] = self.decoder_config.to_dict()
_lowerCamelCase : Tuple = self.__class__.model_type
return output | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase_ : str = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
F' reinstalling {pkg}.' )
if not ops[op](version.parse(_lowerCAmelCase ) , version.parse(_lowerCAmelCase ) ):
raise ImportError(
F'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = F'\n{hint}' if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , _lowerCAmelCase ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = requirement, None, None
else:
_lowerCamelCase : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , _lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F' got {requirement}' )
_lowerCamelCase , _lowerCamelCase : Tuple = match[0]
_lowerCamelCase : Union[str, Any] = want_full.split("," ) # there could be multiple requirements
_lowerCamelCase : List[str] = {}
for w in want_range:
_lowerCamelCase : int = re.findall(r"^([\s!=<>]{1,2})(.+)" , _lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F' but got {requirement}' )
_lowerCamelCase , _lowerCamelCase : Any = match[0]
_lowerCamelCase : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
_lowerCamelCase : Union[str, Any] = ".".join([str(_lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return
# check if any version is installed
try:
_lowerCamelCase : Dict = importlib.metadata.version(_lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Dict = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCAmelCase , _lowerCAmelCase ) | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCAmelCase__ :
def __init__( self : Union[str, Any],__A : List[str],__A : Union[str, Any]=1_3,__A : str=7,__A : List[Any]=6,__A : Optional[Any]=1_7,__A : Tuple=2_3,__A : int=1_1,__A : Optional[int]=True,):
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : Dict = seq_length
_lowerCamelCase : List[Any] = act_dim
_lowerCamelCase : Dict = state_dim
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : int = is_training
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_lowerCamelCase : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_lowerCamelCase : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCamelCase : List[Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, self.seq_length),vocab_size=1_0_0_0 )
_lowerCamelCase : Dict = random_attention_mask((self.batch_size, self.seq_length) )
_lowerCamelCase : Optional[Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase_ ( self : int ):
return DecisionTransformerConfig(
batch_size=self.batch_size,seq_length=self.seq_length,act_dim=self.act_dim,state_dim=self.state_dim,hidden_size=self.hidden_size,max_length=self.max_length,)
def lowerCamelCase_ ( self : int,__A : List[str],__A : Union[str, Any],__A : Dict,__A : Any,__A : List[str],__A : Any,__A : str,):
_lowerCamelCase : Optional[int] = DecisionTransformerModel(config=__A )
model.to(__A )
model.eval()
_lowerCamelCase : Any = model(__A,__A,__A,__A,__A,__A )
self.parent.assertEqual(result.state_preds.shape,states.shape )
self.parent.assertEqual(result.action_preds.shape,actions.shape )
self.parent.assertEqual(result.return_preds.shape,returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCamelCase : List[str] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A , A , A , unittest.TestCase ):
lowerCAmelCase_ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase_ = ()
lowerCAmelCase_ = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase_ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Union[str, Any] = DecisionTransformerModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = DecisionTransformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__A )
_lowerCamelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Tuple = [*signature.parameters.keys()]
_lowerCamelCase : List[str] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(__A )],__A )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : str = 2 # number of steps of autoregressive prediction we will perform
_lowerCamelCase : Tuple = 1_0 # defined by the RL environment, may be normalized
_lowerCamelCase : List[str] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
_lowerCamelCase : str = model.to(__A )
_lowerCamelCase : Optional[Any] = model.config
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = torch.randn(1,1,config.state_dim ).to(device=__A,dtype=torch.floataa ) # env.reset()
_lowerCamelCase : List[Any] = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]],device=__A )
_lowerCamelCase : Dict = torch.tensor(__A,device=__A,dtype=torch.floataa ).reshape(1,1,1 )
_lowerCamelCase : int = state
_lowerCamelCase : int = torch.zeros(1,0,config.act_dim,device=__A,dtype=torch.floataa )
_lowerCamelCase : List[Any] = torch.zeros(1,0,device=__A,dtype=torch.floataa )
_lowerCamelCase : int = torch.tensor(0,device=__A,dtype=torch.long ).reshape(1,1 )
for step in range(__A ):
_lowerCamelCase : Any = torch.cat([actions, torch.zeros(1,1,config.act_dim,device=__A )],dim=1 )
_lowerCamelCase : Any = torch.cat([rewards, torch.zeros(1,1,device=__A )],dim=1 )
_lowerCamelCase : Optional[int] = torch.ones(1,states.shape[1] ).to(dtype=torch.long,device=states.device )
with torch.no_grad():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = model(
states=__A,actions=__A,rewards=__A,returns_to_go=__A,timesteps=__A,attention_mask=__A,return_dict=__A,)
self.assertEqual(action_pred.shape,actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1],expected_outputs[step],atol=1e-4 ) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = ( # env.step(action)
torch.randn(1,1,config.state_dim ).to(device=__A,dtype=torch.floataa ),
1.0,
False,
{},
)
_lowerCamelCase : int = action_pred[0, -1]
_lowerCamelCase : List[Any] = torch.cat([states, state],dim=1 )
_lowerCamelCase : Dict = returns_to_go[0, -1] - reward
_lowerCamelCase : Any = torch.cat([returns_to_go, pred_return.reshape(1,1,1 )],dim=1 )
_lowerCamelCase : Dict = torch.cat(
[timesteps, torch.ones((1, 1),device=__A,dtype=torch.long ) * (step + 1)],dim=1 ) | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Any = str(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 9 and set(_lowerCAmelCase ) == set("123456789" )
def A_ ( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : int = 100002 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : str = 1002003 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ : Union[str, Any] = True
except (ImportError, AttributeError):
UpperCAmelCase_ : Optional[Any] = object
def A_ ( *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
pass
UpperCAmelCase_ : Optional[int] = False
UpperCAmelCase_ : Tuple = logging.get_logger('transformers-cli/serving')
def A_ ( _lowerCAmelCase : Namespace ):
"""simple docstring"""
_lowerCamelCase : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCAmelCase , args.host , args.port , args.workers )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
@staticmethod
def lowerCamelCase_ ( __A : ArgumentParser ):
_lowerCamelCase : int = parser.add_parser(
"serve",help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task",type=__A,choices=get_supported_tasks(),help="The task to run the pipeline on",)
serve_parser.add_argument("--host",type=__A,default="localhost",help="Interface the server will listen on." )
serve_parser.add_argument("--port",type=__A,default=8_8_8_8,help="Port the serving will listen to." )
serve_parser.add_argument("--workers",type=__A,default=1,help="Number of http workers" )
serve_parser.add_argument("--model",type=__A,help="Model's name or path to stored model." )
serve_parser.add_argument("--config",type=__A,help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer",type=__A,help="Tokenizer name to use." )
serve_parser.add_argument(
"--device",type=__A,default=-1,help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",)
serve_parser.set_defaults(func=__A )
def __init__( self : List[Any],__A : Pipeline,__A : str,__A : int,__A : int ):
_lowerCamelCase : Tuple = pipeline
_lowerCamelCase : str = host
_lowerCamelCase : List[str] = port
_lowerCamelCase : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f'Serving model over {host}:{port}' )
_lowerCamelCase : Optional[int] = FastAPI(
routes=[
APIRoute(
"/",self.model_info,response_model=__A,response_class=__A,methods=["GET"],),
APIRoute(
"/tokenize",self.tokenize,response_model=__A,response_class=__A,methods=["POST"],),
APIRoute(
"/detokenize",self.detokenize,response_model=__A,response_class=__A,methods=["POST"],),
APIRoute(
"/forward",self.forward,response_model=__A,response_class=__A,methods=["POST"],),
],timeout=6_0_0,)
def lowerCamelCase_ ( self : str ):
run(self._app,host=self.host,port=self.port,workers=self.workers )
def lowerCamelCase_ ( self : Dict ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowerCamelCase_ ( self : Tuple,__A : str = Body(__A,embed=__A ),__A : bool = Body(__A,embed=__A ) ):
try:
_lowerCamelCase : Tuple = self._pipeline.tokenizer.tokenize(__A )
if return_ids:
_lowerCamelCase : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(__A )
return ServeTokenizeResult(tokens=__A,tokens_ids=__A )
else:
return ServeTokenizeResult(tokens=__A )
except Exception as e:
raise HTTPException(status_code=5_0_0,detail={"model": "", "error": str(__A )} )
def lowerCamelCase_ ( self : List[Any],__A : List[int] = Body(__A,embed=__A ),__A : bool = Body(__A,embed=__A ),__A : bool = Body(__A,embed=__A ),):
try:
_lowerCamelCase : List[str] = self._pipeline.tokenizer.decode(__A,__A,__A )
return ServeDeTokenizeResult(model="",text=__A )
except Exception as e:
raise HTTPException(status_code=5_0_0,detail={"model": "", "error": str(__A )} )
async def lowerCamelCase_ ( self : Tuple,__A : Dict=Body(__A,embed=__A ) ):
# Check we don't have empty string
if len(__A ) == 0:
return ServeForwardResult(output=[],attention=[] )
try:
# Forward through the model
_lowerCamelCase : Tuple = self._pipeline(__A )
return ServeForwardResult(output=__A )
except Exception as e:
raise HTTPException(5_0_0,{"error": str(__A )} ) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int = 4000000 ):
"""simple docstring"""
_lowerCamelCase : List[str] = []
_lowerCamelCase , _lowerCamelCase : Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = b, a + b
return sum(_lowerCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
import os
import numpy
import onnx
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = a.name
_lowerCamelCase : Optional[int] = b.name
_lowerCamelCase : Optional[int] = ""
_lowerCamelCase : int = ""
_lowerCamelCase : List[str] = a == b
_lowerCamelCase : Tuple = name_a
_lowerCamelCase : int = name_b
return res
def A_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCAmelCase , _lowerCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCAmelCase , _lowerCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = list(model.graph.initializer )
_lowerCamelCase : Dict = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowerCamelCase : Optional[Any] = inits[i].name
_lowerCamelCase : Any = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = os.path.dirname(_lowerCAmelCase )
_lowerCamelCase : str = os.path.basename(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = onnx.load(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = list(model.graph.initializer )
_lowerCamelCase : str = set()
_lowerCamelCase : int = {}
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[Any] = 0
for i in range(len(_lowerCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCAmelCase )
dup_set.add(_lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inits[j].data_type
_lowerCamelCase : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _lowerCAmelCase )
total_reduced_size += mem_size
_lowerCamelCase : Union[str, Any] = inits[i].name
_lowerCamelCase : Optional[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCAmelCase )
else:
_lowerCamelCase : Any = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB" )
_lowerCamelCase : Dict = sorted(_lowerCAmelCase )
_remove_dup_initializers_from_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase : Tuple = "optimized_" + model_file_name
_lowerCamelCase : int = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
onnx.save(_lowerCAmelCase , _lowerCAmelCase )
return new_model | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase__ :
def __init__( self : Union[str, Any],__A : str,__A : str ):
_lowerCamelCase , _lowerCamelCase : List[str] = text, pattern
_lowerCamelCase , _lowerCamelCase : Any = len(__A ), len(__A )
def lowerCamelCase_ ( self : Any,__A : str ):
for i in range(self.patLen - 1,-1,-1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase_ ( self : Tuple,__A : int ):
for i in range(self.patLen - 1,-1,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase_ ( self : Dict ):
# searches pattern in text and returns index positions
_lowerCamelCase : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCamelCase : int = self.mismatch_in_text(__A )
if mismatch_index == -1:
positions.append(__A )
else:
_lowerCamelCase : List[str] = self.match_in_pattern(self.text[mismatch_index] )
_lowerCamelCase : List[str] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
UpperCAmelCase_ : int = 'ABAABA'
UpperCAmelCase_ : List[str] = 'AB'
UpperCAmelCase_ : Tuple = BoyerMooreSearch(text, pattern)
UpperCAmelCase_ : List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions) | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A_ ( _lowerCAmelCase : str = "laptop" ):
"""simple docstring"""
_lowerCamelCase : Dict = F'https://www.amazon.in/laptop/s?k={product}'
_lowerCamelCase : Tuple = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_lowerCamelCase : int = BeautifulSoup(requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
_lowerCamelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_lowerCamelCase : Dict = item.ha.text
_lowerCamelCase : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
_lowerCamelCase : Optional[int] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_lowerCamelCase : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_lowerCamelCase : Optional[Any] = "Not available"
try:
_lowerCamelCase : Dict = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_lowerCamelCase : Union[str, Any] = ""
try:
_lowerCamelCase : Union[str, Any] = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_lowerCamelCase : List[str] = float("nan" )
except AttributeError:
pass
_lowerCamelCase : Dict = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_lowerCamelCase : List[str] = " "
_lowerCamelCase : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCAmelCase_ : str = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''') | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'yolos'
def __init__( self : str,__A : Optional[Any]=7_6_8,__A : int=1_2,__A : Any=1_2,__A : Union[str, Any]=3_0_7_2,__A : List[Any]="gelu",__A : Dict=0.0,__A : str=0.0,__A : str=0.02,__A : int=1e-12,__A : Union[str, Any]=[5_1_2, 8_6_4],__A : Any=1_6,__A : str=3,__A : Optional[int]=True,__A : str=1_0_0,__A : Tuple=True,__A : Optional[int]=False,__A : str=1,__A : Union[str, Any]=5,__A : str=2,__A : Dict=5,__A : Optional[Any]=2,__A : List[Any]=0.1,**__A : Tuple,):
super().__init__(**__A )
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Dict = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : List[str] = qkv_bias
_lowerCamelCase : Optional[int] = num_detection_tokens
_lowerCamelCase : Tuple = use_mid_position_embeddings
_lowerCamelCase : List[str] = auxiliary_loss
# Hungarian matcher
_lowerCamelCase : List[Any] = class_cost
_lowerCamelCase : List[str] = bbox_cost
_lowerCamelCase : str = giou_cost
# Loss coefficients
_lowerCamelCase : Optional[int] = bbox_loss_coefficient
_lowerCamelCase : List[str] = giou_loss_coefficient
_lowerCamelCase : Tuple = eos_coefficient
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase_ ( self : str ):
return 1e-4
@property
def lowerCamelCase_ ( self : Dict ):
return 1_2 | 11 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 1 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _lowerCAmelCase : float = 0.1 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : int = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ : str = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase_ : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Iterable[int] ):
_lowerCamelCase : Node | None = None
for i in sorted(__A,reverse=__A ):
_lowerCamelCase : Dict = Node(__A,self.head )
def __iter__( self : str ):
_lowerCamelCase : Dict = self.head
while node:
yield node.data
_lowerCamelCase : Optional[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : str ):
return " -> ".join([str(__A ) for node in self] )
def A_ ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[str] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Any = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['DeiTFeatureExtractor']
UpperCAmelCase_ : Any = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
while a != 0:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = b % a, a
return b
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if gcd(_lowerCAmelCase , _lowerCAmelCase ) != 1:
_lowerCamelCase : List[Any] = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = 1, 0, a
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = 0, 1, m
while va != 0:
_lowerCamelCase : str = ua // va
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Union[str, Any],__A : Optional[int]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Any=True,__A : Dict=True,__A : List[Any]=True,__A : Optional[Any]=9_9,__A : List[str]=3_2,__A : int=5,__A : str=4,__A : int=3_7,__A : Optional[Any]="gelu",__A : Union[str, Any]=0.1,__A : str=0.1,__A : Any=5_1_2,__A : Any=1_6,__A : Optional[int]=2,__A : int=0.02,__A : Optional[int]=4,):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : str = use_attention_mask
_lowerCamelCase : str = use_token_type_ids
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Any = None
if self.use_token_type_ids:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = AlbertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = FlaxAlbertModelTester(self )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : str = model_class_name.from_pretrained("albert-base-v2" )
_lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCamelCase : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A )[0]
_lowerCamelCase : Dict = (1, 1_1, 7_6_8)
self.assertEqual(output.shape,__A )
_lowerCamelCase : Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4],__A,atol=1e-4 ) ) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
_lowerCamelCase : Tuple = [int(_lowerCAmelCase ) for i in num_string]
_lowerCamelCase : Optional[Any] = 1
for i in range(0 , len(_lowerCAmelCase ) ):
total *= numbers[i]
_lowerCamelCase : Optional[Any] = str(_lowerCAmelCase )
steps += 1
return steps
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : List[Any] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
_lowerCamelCase : Any = [int(_lowerCAmelCase ) for i in num_string]
_lowerCamelCase : List[str] = 0
for i in range(0 , len(_lowerCAmelCase ) ):
total += numbers[i]
_lowerCamelCase : List[str] = str(_lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 11 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline | 11 |
'''simple docstring'''
from math import sqrt
def A_ ( _lowerCAmelCase : int = 1000000 ):
"""simple docstring"""
_lowerCamelCase : int = 0
_lowerCamelCase : int = 0
_lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=_lowerCAmelCase , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=_lowerCAmelCase , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=_lowerCAmelCase , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=_lowerCAmelCase , default=0 , help="cuda_id." , )
_lowerCamelCase : Union[str, Any] = parser.parse_args()
return args
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not len(_lowerCAmelCase ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_lowerCamelCase , _lowerCamelCase : Tuple = imgs[0].size
_lowerCamelCase : List[str] = Image.new("RGB" , size=(cols * w, rows * h) )
_lowerCamelCase , _lowerCamelCase : List[Any] = grid.size
for i, img in enumerate(_lowerCAmelCase ):
grid.paste(_lowerCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any]="robotic cat with wings" , _lowerCAmelCase : int=7.5 , _lowerCAmelCase : Optional[Any]=50 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Dict=42 , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = torch.Generator(pipeline.device ).manual_seed(_lowerCAmelCase )
_lowerCamelCase : List[Any] = pipeline(
_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , ).images
_lowerCamelCase : List[str] = int(math.sqrt(_lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = image_grid(_lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase_ : Any = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
UpperCAmelCase_ : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
UpperCAmelCase_ : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
UpperCAmelCase_ : Tuple = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
UpperCAmelCase_ : Optional[int] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase_ : List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
UpperCAmelCase_ : str = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
UpperCAmelCase_ : Optional[int] = unet.to(torch.device('cuda', args.cuda_id))
UpperCAmelCase_ : Any = pipeline.to(unet.device)
UpperCAmelCase_, UpperCAmelCase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
UpperCAmelCase_ : Union[str, Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
_lowerCamelCase : Tuple = False
if num < 0:
_lowerCamelCase : List[Any] = True
_lowerCamelCase : int = -num
_lowerCamelCase : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-bert'
UpperCAmelCase_ : List[Any] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
UpperCAmelCase_ : str = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Tuple = cached_file(__A,__A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__A,__A ) ) )
with open(os.path.join(__A,"refs","main" ) ) as f:
_lowerCamelCase : Tuple = f.read()
self.assertEqual(__A,os.path.join(__A,"snapshots",__A,__A ) )
self.assertTrue(os.path.isfile(__A ) )
# File is cached at the same place the second time.
_lowerCamelCase : Tuple = cached_file(__A,__A )
self.assertEqual(__A,__A )
# Using a specific revision to test the full commit hash.
_lowerCamelCase : List[str] = cached_file(__A,__A,revision="9b8c223" )
self.assertEqual(__A,os.path.join(__A,"snapshots",__A,__A ) )
def lowerCamelCase_ ( self : List[str] ):
with self.assertRaisesRegex(__A,"is not a valid model identifier" ):
_lowerCamelCase : Optional[int] = cached_file("tiny-random-bert",__A )
with self.assertRaisesRegex(__A,"is not a valid git identifier" ):
_lowerCamelCase : Union[str, Any] = cached_file(__A,__A,revision="aaaa" )
with self.assertRaisesRegex(__A,"does not appear to have a file named" ):
_lowerCamelCase : Tuple = cached_file(__A,"conf" )
def lowerCamelCase_ ( self : Dict ):
with self.assertRaisesRegex(__A,"does not appear to have a file named" ):
_lowerCamelCase : List[Any] = cached_file(__A,"conf" )
with open(os.path.join(__A,"refs","main" ) ) as f:
_lowerCamelCase : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(__A,".no_exist",__A,"conf" ) ) )
_lowerCamelCase : Dict = cached_file(__A,"conf",_raise_exceptions_for_missing_entries=__A )
self.assertIsNone(__A )
_lowerCamelCase : List[str] = cached_file(__A,"conf",local_files_only=__A,_raise_exceptions_for_missing_entries=__A )
self.assertIsNone(__A )
_lowerCamelCase : Optional[Any] = mock.Mock()
_lowerCamelCase : List[Any] = 5_0_0
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : Union[str, Any] = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request",return_value=__A ) as mock_head:
_lowerCamelCase : int = cached_file(__A,"conf",_raise_exceptions_for_connection_errors=__A )
self.assertIsNone(__A )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self : Tuple ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only",__A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only",__A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only",__A ) )
def lowerCamelCase_ ( self : Tuple ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased","ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__A,"is not a valid model identifier" ):
get_file_from_repo("bert-base-case",__A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__A,"is not a valid git identifier" ):
get_file_from_repo("bert-base-cased",__A,revision="ahaha" )
_lowerCamelCase : Tuple = get_file_from_repo("bert-base-cased",__A )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase : Any = json.loads(open(__A,"r" ).read() )
self.assertEqual(config["hidden_size"],7_6_8 )
def lowerCamelCase_ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : str = Path(__A ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(__A,"a.txt" ),str(__A ) )
self.assertIsNone(get_file_from_repo(__A,"b.txt" ) ) | 11 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : str = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
class UpperCAmelCase__ ( A ):
def __init__( self : str,__A : PriorTransformer,__A : CLIPVisionModel,__A : CLIPImageProcessor,__A : HeunDiscreteScheduler,__A : ShapERenderer,):
super().__init__()
self.register_modules(
prior=__A,image_encoder=__A,image_processor=__A,scheduler=__A,renderer=__A,)
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Tuple,__A : Any,__A : Optional[int],__A : Tuple,__A : Union[str, Any] ):
if latents is None:
_lowerCamelCase : int = randn_tensor(__A,generator=__A,device=__A,dtype=__A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCamelCase : Union[str, Any] = latents.to(__A )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self : List[str],__A : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : List[Any] = torch.device(f'cuda:{gpu_id}' )
_lowerCamelCase : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__A,__A )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder,"_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__A,"_hf_hook" )
and hasattr(module._hf_hook,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self : Tuple,__A : int,__A : Any,__A : str,__A : int,):
if isinstance(__A,__A ) and isinstance(image[0],torch.Tensor ):
_lowerCamelCase : Dict = torch.cat(__A,axis=0 ) if image[0].ndim == 4 else torch.stack(__A,axis=0 )
if not isinstance(__A,torch.Tensor ):
_lowerCamelCase : str = self.image_processor(__A,return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_lowerCamelCase : Any = image.to(dtype=self.image_encoder.dtype,device=__A )
_lowerCamelCase : List[str] = self.image_encoder(__A )["last_hidden_state"]
_lowerCamelCase : Optional[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_lowerCamelCase : Any = image_embeds.repeat_interleave(__A,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : int = torch.zeros_like(__A )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__A )
def __call__( self : Tuple,__A : Union[PIL.Image.Image, List[PIL.Image.Image]],__A : int = 1,__A : int = 2_5,__A : Optional[Union[torch.Generator, List[torch.Generator]]] = None,__A : Optional[torch.FloatTensor] = None,__A : float = 4.0,__A : int = 6_4,__A : Optional[str] = "pil",__A : bool = True,):
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : Optional[int] = 1
elif isinstance(__A,torch.Tensor ):
_lowerCamelCase : Optional[Any] = image.shape[0]
elif isinstance(__A,__A ) and isinstance(image[0],(torch.Tensor, PIL.Image.Image) ):
_lowerCamelCase : str = len(__A )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__A )}' )
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : int = batch_size * num_images_per_prompt
_lowerCamelCase : Union[str, Any] = guidance_scale > 1.0
_lowerCamelCase : Tuple = self._encode_image(__A,__A,__A,__A )
# prior
self.scheduler.set_timesteps(__A,device=__A )
_lowerCamelCase : List[Any] = self.scheduler.timesteps
_lowerCamelCase : Dict = self.prior.config.num_embeddings
_lowerCamelCase : Tuple = self.prior.config.embedding_dim
_lowerCamelCase : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim),image_embeds.dtype,__A,__A,__A,self.scheduler,)
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_lowerCamelCase : Tuple = latents.reshape(latents.shape[0],__A,__A )
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Tuple = self.scheduler.scale_model_input(__A,__A )
_lowerCamelCase : List[str] = self.prior(
__A,timestep=__A,proj_embedding=__A,).predicted_image_embedding
# remove the variance
_lowerCamelCase , _lowerCamelCase : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2],dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_lowerCamelCase , _lowerCamelCase : int = noise_pred.chunk(2 )
_lowerCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_lowerCamelCase : Any = self.scheduler.step(
__A,timestep=__A,sample=__A,).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__A )
_lowerCamelCase : Any = []
for i, latent in enumerate(__A ):
print()
_lowerCamelCase : int = self.renderer.decode(
latent[None, :],__A,size=__A,ray_batch_size=4_0_9_6,n_coarse_samples=6_4,n_fine_samples=1_2_8,)
images.append(__A )
_lowerCamelCase : List[Any] = torch.stack(__A )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_lowerCamelCase : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
_lowerCamelCase : List[str] = [self.numpy_to_pil(__A ) for image in images]
# Offload last model to CPU
if hasattr(self,"final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__A ) | 11 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase_ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModel)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : int = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
lowerCAmelCase_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 11 |
'''simple docstring'''
import random
from typing import Any
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(_lowerCAmelCase ) ):
_lowerCamelCase : Any = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase : List[str] = random.randint(0 , len(_lowerCAmelCase ) - 1 )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase_ : Dict = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 11 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase_ : Tuple = get_logger()
UpperCAmelCase_ : Optional[dict] = None
class UpperCAmelCase__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Tuple,__A : Optional[Any]=None,__A : Optional[Any]=None,**__A : Optional[int] ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A,__A ):
raise ValueError(
f'Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
_lowerCamelCase : List[str] = device if isinstance(__A,__A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCamelCase : Tuple = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'Device with string identifier {self.device} not listed among the available '
f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
f'device: {str(jax.devices()[0] )}.' )
_lowerCamelCase : Optional[int] = str(jax.devices()[0] )
_lowerCamelCase : Tuple = jnp_array_kwargs
@staticmethod
def lowerCamelCase_ ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def lowerCamelCase_ ( self : Union[str, Any],__A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A,__A ) and column:
if all(
isinstance(__A,jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A,axis=0 )
return column
def lowerCamelCase_ ( self : Dict,__A : Dict ):
import jax
import jax.numpy as jnp
if isinstance(__A,(str, bytes, type(__A )) ):
return value
elif isinstance(__A,(np.character, np.ndarray) ) and np.issubdtype(value.dtype,np.character ):
return value.tolist()
_lowerCamelCase : List[Any] = {}
if isinstance(__A,(np.number, np.ndarray) ) and np.issubdtype(value.dtype,np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowerCamelCase : Union[str, Any] = {"dtype": jnp.intaa}
else:
_lowerCamelCase : Optional[Any] = {"dtype": jnp.intaa}
elif isinstance(__A,(np.number, np.ndarray) ) and np.issubdtype(value.dtype,np.floating ):
_lowerCamelCase : List[Any] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A,PIL.Image.Image ):
_lowerCamelCase : int = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowerCamelCase : str = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A,**{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase_ ( self : Tuple,__A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A,torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A,"__array__" ) and not isinstance(__A,jax.Array ):
_lowerCamelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A,np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def lowerCamelCase_ ( self : Tuple,__A : dict ):
return map_nested(self._recursive_tensorize,__A,map_list=__A )
def lowerCamelCase_ ( self : Union[str, Any],__A : pa.Table ):
_lowerCamelCase : Dict = self.numpy_arrow_extractor().extract_row(__A )
_lowerCamelCase : str = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def lowerCamelCase_ ( self : Any,__A : pa.Table ):
_lowerCamelCase : Optional[int] = self.numpy_arrow_extractor().extract_column(__A )
_lowerCamelCase : Dict = self.python_features_decoder.decode_column(__A,pa_table.column_names[0] )
_lowerCamelCase : Optional[Any] = self.recursive_tensorize(__A )
_lowerCamelCase : Tuple = self._consolidate(__A )
return column
def lowerCamelCase_ ( self : Optional[int],__A : pa.Table ):
_lowerCamelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_batch(__A )
_lowerCamelCase : Dict = self.python_features_decoder.decode_batch(__A )
_lowerCamelCase : int = self.recursive_tensorize(__A )
for column_name in batch:
_lowerCamelCase : Optional[int] = self._consolidate(batch[column_name] )
return batch | 11 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any],__A : Dict,__A : List[str]=1_3,__A : Any=7,__A : str=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : Any=True,__A : List[str]=9_9,__A : str=3_2,__A : List[str]=5,__A : Optional[Any]=4,__A : Any=3_7,__A : Optional[Any]="gelu",__A : List[Any]=0.1,__A : Any=0.1,__A : Dict=5_1_2,__A : Tuple=1_6,__A : Tuple=2,__A : List[Any]=0.02,__A : Any=4,):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Optional[int] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_attention_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Union[str, Any] = type_sequence_label_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : List[Any] = num_choices
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_attention_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[str] = None
if self.use_token_type_ids:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : int = True
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("roberta-base",from_pt=__A )
_lowerCamelCase : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not nums:
return 0
_lowerCamelCase : List[Any] = nums[0]
_lowerCamelCase : Tuple = 0
for num in nums[1:]:
_lowerCamelCase , _lowerCamelCase : Optional[int] = (
max_excluding + num,
max(_lowerCAmelCase , _lowerCAmelCase ),
)
return max(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 11 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[Any],__A : str=None,__A : str=None,__A : int="<|endoftext|>",__A : List[str]="<|endoftext|>",__A : Optional[Any]="<|endoftext|>",__A : Union[str, Any]=False,__A : List[Any]=True,**__A : str,):
super().__init__(
ByteLevelBPETokenizer(
vocab=__A,merges=__A,add_prefix_space=__A,trim_offsets=__A,),bos_token=__A,eos_token=__A,unk_token=__A,**__A,)
_lowerCamelCase : List[Any] = add_prefix_space
def lowerCamelCase_ ( self : Any,__A : Optional[Any],__A : Optional[int]=None ):
_lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Any = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCAmelCase__ :
def __init__( self : Optional[int],__A : Optional[Any],__A : Any=1_3,__A : int=7,__A : Any=True,__A : Optional[Any]=True,__A : Any=True,__A : Any=True,__A : Tuple=9_9,__A : Optional[int]=3_2,__A : Any=2,__A : str=4,__A : Optional[Any]=3_7,__A : int="gelu",__A : List[str]=0.1,__A : List[Any]=0.1,__A : str=5_1_2,__A : str=1_6,__A : Any=2,__A : int=0.02,__A : Union[str, Any]=3,__A : Tuple=4,__A : str=None,__A : str=0,):
_lowerCamelCase : Dict = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : str = seq_length
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : Optional[Any] = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : str = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : List[str] = type_sequence_label_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Any = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : List[Any] = scope
_lowerCamelCase : Optional[int] = projection_dim
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Dict = None
if self.use_token_type_ids:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : Tuple = BertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=__A,initializer_range=self.initializer_range,)
_lowerCamelCase : Union[str, Any] = DPRConfig(projection_dim=self.projection_dim,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : List[str],__A : Any,__A : int,__A : Any,__A : Dict,__A : int,__A : Tuple,__A : Union[str, Any] ):
_lowerCamelCase : Tuple = TFDPRContextEncoder(config=__A )
_lowerCamelCase : Any = model(__A,attention_mask=__A,token_type_ids=__A )
_lowerCamelCase : Dict = model(__A,token_type_ids=__A )
_lowerCamelCase : Any = model(__A )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase_ ( self : Dict,__A : Union[str, Any],__A : List[str],__A : Any,__A : Optional[Any],__A : Any,__A : str,__A : Tuple ):
_lowerCamelCase : Any = TFDPRQuestionEncoder(config=__A )
_lowerCamelCase : Optional[Any] = model(__A,attention_mask=__A,token_type_ids=__A )
_lowerCamelCase : Tuple = model(__A,token_type_ids=__A )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.projection_dim or self.hidden_size) )
def lowerCamelCase_ ( self : int,__A : Optional[Any],__A : Tuple,__A : Optional[Any],__A : Optional[Any],__A : Any,__A : Optional[Any],__A : Any ):
_lowerCamelCase : Optional[int] = TFDPRReader(config=__A )
_lowerCamelCase : int = model(__A,attention_mask=__A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape,(self.batch_size,) )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[int] = config_and_inputs
_lowerCamelCase : Optional[Any] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Dict = TFDPRModelTester(self )
_lowerCamelCase : Any = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__A )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__A )
@slow
def lowerCamelCase_ ( self : Any ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = TFDPRContextEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = TFDPRQuestionEncoder.from_pretrained(__A )
self.assertIsNotNone(__A )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = TFDPRReader.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
_lowerCamelCase : Optional[int] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
_lowerCamelCase : Optional[Any] = model(__A )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_lowerCamelCase : Tuple = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy(),expected_slice.numpy(),atol=1e-4 ) ) | 11 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
random.seed(_lowerCAmelCase )
np.random.seed(_lowerCAmelCase )
torch.manual_seed(_lowerCAmelCase )
torch.cuda.manual_seed_all(_lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class UpperCAmelCase__ :
def __init__( self : List[str],__A : Iterable[torch.nn.Parameter],__A : float = 0.9999,__A : float = 0.0,__A : int = 0,__A : bool = False,__A : Union[float, int] = 1.0,__A : Union[float, int] = 2 / 3,__A : Optional[Any] = None,__A : Dict[str, Any] = None,**__A : Optional[Any],):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Any = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Dict = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase : Optional[Any] = True
if kwargs.get("max_value",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Optional[Any] = kwargs["max_value"]
if kwargs.get("min_value",__A ) is not None:
_lowerCamelCase : Tuple = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",__A,standard_warn=__A )
_lowerCamelCase : Any = kwargs["min_value"]
_lowerCamelCase : Optional[int] = list(__A )
_lowerCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device",__A ) is not None:
_lowerCamelCase : Optional[int] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",__A,standard_warn=__A )
self.to(device=kwargs["device"] )
_lowerCamelCase : Tuple = None
_lowerCamelCase : Dict = decay
_lowerCamelCase : List[Any] = min_decay
_lowerCamelCase : Optional[Any] = update_after_step
_lowerCamelCase : Any = use_ema_warmup
_lowerCamelCase : Union[str, Any] = inv_gamma
_lowerCamelCase : str = power
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = None # set in `step()`
_lowerCamelCase : List[str] = model_cls
_lowerCamelCase : Dict = model_config
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any],__A : List[str],__A : Optional[int] ):
_lowerCamelCase , _lowerCamelCase : Optional[int] = model_cls.load_config(__A,return_unused_kwargs=__A )
_lowerCamelCase : Optional[Any] = model_cls.from_pretrained(__A )
_lowerCamelCase : Union[str, Any] = cls(model.parameters(),model_cls=__A,model_config=model.config )
ema_model.load_state_dict(__A )
return ema_model
def lowerCamelCase_ ( self : str,__A : int ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_lowerCamelCase : Tuple = self.model_cls.from_config(self.model_config )
_lowerCamelCase : List[str] = self.state_dict()
state_dict.pop("shadow_params",__A )
model.register_to_config(**__A )
self.copy_to(model.parameters() )
model.save_pretrained(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
_lowerCamelCase : str = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase : List[str] = (1 + step) / (1_0 + step)
_lowerCamelCase : Union[str, Any] = min(__A,self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase : Union[str, Any] = max(__A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCamelCase_ ( self : Any,__A : Iterable[torch.nn.Parameter] ):
if isinstance(__A,torch.nn.Module ):
_lowerCamelCase : Dict = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",__A,standard_warn=__A,)
_lowerCamelCase : Any = parameters.parameters()
_lowerCamelCase : str = list(__A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase : Dict = self.get_decay(self.optimization_step )
_lowerCamelCase : Optional[Any] = decay
_lowerCamelCase : List[Any] = 1 - decay
_lowerCamelCase : List[Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,__A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase : List[Any] = deepspeed.zero.GatheredParameters(__A,modifier_rank=__A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__A )
def lowerCamelCase_ ( self : Dict,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = list(__A )
for s_param, param in zip(self.shadow_params,__A ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCamelCase_ ( self : List[str],__A : Dict=None,__A : Any=None ):
_lowerCamelCase : int = [
p.to(device=__A,dtype=__A ) if p.is_floating_point() else p.to(device=__A )
for p in self.shadow_params
]
def lowerCamelCase_ ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCamelCase_ ( self : Tuple,__A : Iterable[torch.nn.Parameter] ):
_lowerCamelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCamelCase_ ( self : int,__A : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,__A ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase : List[str] = None
def lowerCamelCase_ ( self : Dict,__A : dict ):
_lowerCamelCase : List[str] = copy.deepcopy(__A )
_lowerCamelCase : Optional[Any] = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_lowerCamelCase : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,__A ):
raise ValueError("Invalid min_decay" )
_lowerCamelCase : Tuple = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,__A ):
raise ValueError("Invalid optimization_step" )
_lowerCamelCase : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,__A ):
raise ValueError("Invalid update_after_step" )
_lowerCamelCase : List[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,__A ):
raise ValueError("Invalid use_ema_warmup" )
_lowerCamelCase : Tuple = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
_lowerCamelCase : Union[str, Any] = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
_lowerCamelCase : Optional[Any] = state_dict.get("shadow_params",__A )
if shadow_params is not None:
_lowerCamelCase : str = shadow_params
if not isinstance(self.shadow_params,__A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(__A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" ) | 11 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCAmelCase_ : Tuple = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : List[str],__A : Dict=1_6,__A : Tuple=1_3,__A : Any=7,__A : Tuple=1_4,__A : Optional[Any]=1_0,__A : int=1_9,__A : int=5,__A : Union[str, Any]=4,__A : Optional[int]=True,__A : List[Any]=1_6,__A : Optional[Any]=2,__A : Optional[int]=4,__A : Union[str, Any]=4,__A : List[Any]="gelu",__A : Union[str, Any]=0.1,__A : Any=0.1,__A : List[str]=[1, 2, 3, 4, 5],__A : Tuple=2_5,__A : List[Any]=5,):
_lowerCamelCase : Optional[int] = d_model
_lowerCamelCase : List[str] = parent
_lowerCamelCase : int = batch_size
_lowerCamelCase : Union[str, Any] = prediction_length
_lowerCamelCase : Optional[Any] = context_length
_lowerCamelCase : Tuple = cardinality
_lowerCamelCase : Any = num_time_features
_lowerCamelCase : str = lags_sequence
_lowerCamelCase : List[Any] = embedding_dimension
_lowerCamelCase : Any = is_training
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = context_length
_lowerCamelCase : Union[str, Any] = prediction_length + label_length
_lowerCamelCase : str = label_length
_lowerCamelCase : List[Any] = moving_average
_lowerCamelCase : Optional[Any] = autocorrelation_factor
def lowerCamelCase_ ( self : Tuple ):
return AutoformerConfig(
d_model=self.d_model,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,prediction_length=self.prediction_length,context_length=self.context_length,label_length=self.label_length,lags_sequence=self.lags_sequence,num_time_features=self.num_time_features,num_static_categorical_features=1,cardinality=[self.cardinality],embedding_dimension=[self.embedding_dimension],moving_average=self.moving_average,)
def lowerCamelCase_ ( self : Optional[Any],__A : int ):
_lowerCamelCase : List[str] = config.context_length + max(config.lags_sequence )
_lowerCamelCase : str = ids_tensor([self.batch_size, 1],config.cardinality[0] )
_lowerCamelCase : Dict = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowerCamelCase : int = floats_tensor([self.batch_size, _past_length] )
_lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowerCamelCase : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, config.prediction_length] )
_lowerCamelCase : Optional[int] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Union[str, Any] = self.get_config()
_lowerCamelCase : Union[str, Any] = self.prepare_autoformer_inputs_dict(__A )
return config, inputs_dict
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self : str,__A : Any,__A : Union[str, Any] ):
_lowerCamelCase : Any = AutoformerModel(config=__A ).to(__A ).eval()
_lowerCamelCase : Optional[Any] = model(**__A )
_lowerCamelCase : Optional[Any] = outputs.encoder_last_hidden_state
_lowerCamelCase : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : List[Any] = model.get_encoder()
encoder.save_pretrained(__A )
_lowerCamelCase : List[Any] = AutoformerEncoder.from_pretrained(__A ).to(__A )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = model.create_network_inputs(**__A )
_lowerCamelCase , _lowerCamelCase : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowerCamelCase : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]),dim=-1,)
_lowerCamelCase : Tuple = encoder(inputs_embeds=__A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_lowerCamelCase : List[str] = (
torch.mean(transformer_inputs[:, : config.context_length, ...],dim=1 )
.unsqueeze(1 )
.repeat(1,config.prediction_length,1 )
)
_lowerCamelCase : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]],device=enc_input.device,)
_lowerCamelCase : Dict = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros),dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
),dim=-1,)
_lowerCamelCase : Tuple = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean),dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
),dim=-1,)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase : int = model.get_decoder()
decoder.save_pretrained(__A )
_lowerCamelCase : Optional[Any] = AutoformerDecoder.from_pretrained(__A ).to(__A )
_lowerCamelCase : List[Any] = decoder(
trend=__A,inputs_embeds=__A,encoder_hidden_states=__A,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase_ = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase_ = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : int = AutoformerModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self,config_class=__A,has_text_modality=__A )
def lowerCamelCase_ ( self : int ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowerCamelCase : Union[str, Any] = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
_lowerCamelCase , _lowerCamelCase : str = model_class.from_pretrained(__A,output_loading_info=__A )
self.assertEqual(info["missing_keys"],[] )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCamelCase_ ( self : List[str] ):
pass
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : str = inspect.signature(getattr(__A,"forward" ) )
# The main input is the name of the argument after `self`
_lowerCamelCase : int = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name,__A )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__A )
_lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : str = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__A )],__A )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase , _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = getattr(self.model_tester,"seq_length",__A )
_lowerCamelCase : List[str] = getattr(self.model_tester,"decoder_seq_length",__A )
_lowerCamelCase : List[str] = getattr(self.model_tester,"encoder_seq_length",__A )
_lowerCamelCase : Dict = getattr(self.model_tester,"d_model",__A )
_lowerCamelCase : List[str] = getattr(self.model_tester,"num_attention_heads",__A )
_lowerCamelCase : Optional[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Any = False
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : str = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ),self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase : List[Any] = True
_lowerCamelCase : Any = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__A,__A ) )
_lowerCamelCase : int = outputs.encoder_attentions
self.assertEqual(len(__A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, encoder_seq_length, dim],)
_lowerCamelCase : str = len(__A )
_lowerCamelCase : Optional[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__A,__A )
# decoder attentions
_lowerCamelCase : Optional[Any] = outputs.decoder_attentions
self.assertIsInstance(__A,(list, tuple) )
self.assertEqual(len(__A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, decoder_seq_length, dim],)
# cross attentions
_lowerCamelCase : int = outputs.cross_attentions
self.assertIsInstance(__A,(list, tuple) )
self.assertEqual(len(__A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, decoder_seq_length, dim],)
# Check attention is always last and order is fine
_lowerCamelCase : Dict = True
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**self._prepare_for_class(__A,__A ) )
self.assertEqual(out_len + 2,len(__A ) )
_lowerCamelCase : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, encoder_seq_length, dim],)
@is_flaky()
def lowerCamelCase_ ( self : Optional[int] ):
super().test_retain_grad_hidden_states_attentions()
def A_ ( _lowerCAmelCase : List[str]="train-batch.pt" ):
"""simple docstring"""
_lowerCamelCase : Dict = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_lowerCAmelCase , repo_type="dataset" )
_lowerCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : Dict = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__A )
_lowerCamelCase : Tuple = prepare_batch()
with torch.no_grad():
_lowerCamelCase : List[Any] = model(
past_values=batch["past_values"],past_time_features=batch["past_time_features"],past_observed_mask=batch["past_observed_mask"],static_categorical_features=batch["static_categorical_features"],future_values=batch["future_values"],future_time_features=batch["future_time_features"],)[0]
_lowerCamelCase : List[Any] = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape,__A )
_lowerCamelCase : List[str] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]],device=__A )
self.assertTrue(torch.allclose(output[0, :3, :3],__A,atol=__A ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Union[str, Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__A )
_lowerCamelCase : Dict = prepare_batch("val-batch.pt" )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(
past_values=batch["past_values"],past_time_features=batch["past_time_features"],past_observed_mask=batch["past_observed_mask"],static_categorical_features=batch["static_categorical_features"],).encoder_last_hidden_state
_lowerCamelCase : Optional[int] = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape,__A )
_lowerCamelCase : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]],device=__A )
self.assertTrue(torch.allclose(output[0, :3, :3],__A,atol=__A ) )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__A )
_lowerCamelCase : Any = prepare_batch("val-batch.pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model.generate(
static_categorical_features=batch["static_categorical_features"],past_time_features=batch["past_time_features"],past_values=batch["past_values"],future_time_features=batch["future_time_features"],past_observed_mask=batch["past_observed_mask"],)
_lowerCamelCase : Optional[Any] = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape,__A )
_lowerCamelCase : Any = torch.tensor([3130.6763, 4056.5293, 7053.0786],device=__A )
_lowerCamelCase : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:],__A,rtol=1e-1 ) ) | 11 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location="cpu" )
if "model" in sd.keys():
_lowerCamelCase : Optional[Any] = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
# pop unnecessary weights
_lowerCamelCase : Optional[Any] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_lowerCamelCase : Optional[Any] = sd.pop(_lowerCAmelCase )
_lowerCamelCase : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_lowerCamelCase : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
_lowerCamelCase : int = key.replace(".qkv_proj." , ".q_proj." )
_lowerCamelCase : List[Any] = key.replace(".qkv_proj." , ".k_proj." )
_lowerCamelCase : Dict = key.replace(".qkv_proj." , ".v_proj." )
_lowerCamelCase : Any = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = torch.split(_lowerCAmelCase , depth // 3 , dim=0 )
_lowerCamelCase : str = q
_lowerCamelCase : Dict = k
_lowerCamelCase : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Any = load_checkpoint(_lowerCAmelCase )
if config is not None:
_lowerCamelCase : str = OPTConfig.from_pretrained(_lowerCAmelCase )
else:
_lowerCamelCase : Any = OPTConfig()
_lowerCamelCase : Optional[int] = OPTModel(_lowerCAmelCase ).half().eval()
model.load_state_dict(_lowerCAmelCase )
# Check results
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
UpperCAmelCase_ : str = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config) | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Tuple = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = ['YolosFeatureExtractor']
UpperCAmelCase_ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_lowerCamelCase : List[str] = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Optional[int] = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowerCamelCase : Optional[Any] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowerCamelCase : List[Any] = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : str = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Tuple = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : Union[str, Any] = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[str] = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Any = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowerCamelCase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowerCamelCase : Optional[int] = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowerCamelCase : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowerCamelCase : Tuple = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowerCamelCase : Optional[Any] = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : int = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowerCamelCase : Tuple = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : Optional[Any] = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowerCamelCase : str = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Any = key.split("." )
_lowerCamelCase , _lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : int = val[dim : dim * 2, :]
_lowerCamelCase : str = val[-dim:, :]
else:
_lowerCamelCase : Optional[int] = val[:dim]
_lowerCamelCase : str = val[dim : dim * 2]
_lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[Any] = key.split("." )
_lowerCamelCase : int = int(key_split[3] )
_lowerCamelCase : Tuple = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : str = val[
dim : dim * 2, :
]
_lowerCamelCase : Optional[int] = val[-dim:, :]
else:
_lowerCamelCase : str = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Optional[int] = val[-dim:]
else:
_lowerCamelCase : int = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : Any = val.squeeze_()
else:
_lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str="groupvit-gcc-yfcc" , _lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = GroupViTConfig()
_lowerCamelCase : Union[str, Any] = GroupViTModel(_lowerCAmelCase ).eval()
_lowerCamelCase : Any = torch.load(_lowerCAmelCase , map_location="cpu" )["model"]
_lowerCamelCase : Union[str, Any] = convert_state_dict(_lowerCAmelCase , _lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : Tuple = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_lowerCamelCase : List[Any] = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Any = processor(text=["a photo of a cat", "a photo of a dog"] , images=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
with torch.no_grad():
_lowerCamelCase : List[str] = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : List[Any] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Optional[Any] = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _lowerCAmelCase , atol=1E-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print("Successfully saved processor and model to" , _lowerCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase , organization="nielsr" )
model.push_to_hub(_lowerCAmelCase , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 11 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'umt5'
lowerCAmelCase_ = ['past_key_values']
def __init__( self : int,__A : List[Any]=2_5_0_1_1_2,__A : Optional[int]=5_1_2,__A : Tuple=6_4,__A : Optional[int]=1_0_2_4,__A : Optional[Any]=8,__A : Tuple=None,__A : int=6,__A : List[str]=3_2,__A : List[Any]=1_2_8,__A : Union[str, Any]=0.1,__A : Optional[int]=1e-6,__A : List[Any]=1.0,__A : Dict="gated-gelu",__A : List[Any]=True,__A : Dict=True,__A : List[Any]="T5Tokenizer",__A : int=True,__A : Optional[Any]=0,__A : Any=1,__A : Optional[Any]=0,**__A : Tuple,):
super().__init__(
is_encoder_decoder=__A,tokenizer_class=__A,tie_word_embeddings=__A,pad_token_id=__A,eos_token_id=__A,decoder_start_token_id=__A,**__A,)
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Tuple = d_model
_lowerCamelCase : Any = d_kv
_lowerCamelCase : Dict = d_ff
_lowerCamelCase : Optional[int] = num_layers
_lowerCamelCase : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase : List[Any] = num_heads
_lowerCamelCase : Optional[int] = relative_attention_num_buckets
_lowerCamelCase : Tuple = relative_attention_max_distance
_lowerCamelCase : List[str] = dropout_rate
_lowerCamelCase : Union[str, Any] = layer_norm_epsilon
_lowerCamelCase : Optional[int] = initializer_factor
_lowerCamelCase : Optional[int] = feed_forward_proj
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : int = self.feed_forward_proj.split("-" )
_lowerCamelCase : Optional[int] = act_info[-1]
_lowerCamelCase : List[Any] = act_info[0] == "gated"
if len(__A ) > 1 and act_info[0] != "gated" or len(__A ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
_lowerCamelCase : str = "gelu_new"
@property
def lowerCamelCase_ ( self : int ):
return self.d_model
@property
def lowerCamelCase_ ( self : int ):
return self.num_heads
@property
def lowerCamelCase_ ( self : Optional[int] ):
return self.num_layers
class UpperCAmelCase__ ( A ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[str] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_lowerCamelCase : Optional[Any] = "past_encoder_sequence + sequence"
_lowerCamelCase : Union[str, Any] = {0: "batch"}
_lowerCamelCase : List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCamelCase : int = {0: "batch", 1: "decoder_sequence"}
_lowerCamelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__A,direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCamelCase_ ( self : str ):
return 1_3
@property
def lowerCamelCase_ ( self : int ):
return 5e-4 | 11 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 11 |
'''simple docstring'''
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : float = 1 / 12345 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : List[str] = 3
while True:
_lowerCamelCase : List[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
_lowerCamelCase : Any = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''') | 11 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any],__A : int,__A : int ):
_lowerCamelCase : Dict = jnp.ones((batch_size, length) ) / length
return scores
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = None
_lowerCamelCase : Any = 2_0
_lowerCamelCase : Optional[int] = self._get_uniform_logits(batch_size=2,length=__A )
# tweak scores to not be uniform anymore
_lowerCamelCase : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowerCamelCase : str = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowerCamelCase : Union[str, Any] = jax.nn.softmax(__A,axis=-1 )
_lowerCamelCase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowerCamelCase : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__A,scores.copy(),cur_len=__A ),axis=-1 )
_lowerCamelCase : int = jax.nn.softmax(temp_dist_warper_smoother(__A,scores.copy(),cur_len=__A ),axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :],warped_prob_sharp[0, :],atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :],warped_prob_smooth[0, :],atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(),warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(),warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(),warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(),warped_prob_smooth[1, :].min() )
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = 1_0
_lowerCamelCase : List[Any] = 2
# create ramp distribution
_lowerCamelCase : int = np.broadcast_to(np.arange(__A )[None, :],(batch_size, vocab_size) ).copy()
_lowerCamelCase : str = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowerCamelCase : List[str] = FlaxTopKLogitsWarper(3 )
_lowerCamelCase : Dict = top_k_warp(__A,__A,cur_len=__A )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(),7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(),2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowerCamelCase : List[Any] = 5
_lowerCamelCase : List[str] = FlaxTopKLogitsWarper(top_k=1,filter_value=0.0,min_tokens_to_keep=3 )
_lowerCamelCase : str = np.broadcast_to(np.arange(__A )[None, :],(batch_size, length) ).copy()
_lowerCamelCase : str = top_k_warp_safety_check(__A,__A,cur_len=__A )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(),[2, 2] )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : List[Any] = None
_lowerCamelCase : int = 1_0
_lowerCamelCase : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowerCamelCase : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_lowerCamelCase : Optional[int] = FlaxTopPLogitsWarper(0.8 )
_lowerCamelCase : List[Any] = np.exp(top_p_warp(__A,__A,cur_len=__A ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowerCamelCase : Optional[int] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# check edge cases with negative and extreme logits
_lowerCamelCase : int = np.broadcast_to(np.arange(__A )[None, :],(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowerCamelCase : Tuple = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
_lowerCamelCase : Tuple = FlaxTopPLogitsWarper(0.9,min_tokens_to_keep=2,filter_value=0.0 )
_lowerCamelCase : Optional[Any] = top_p_warp(__A,__A,cur_len=__A )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(),[3, 2] )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Union[str, Any] = 2_0
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Any = FlaxMinLengthLogitsProcessor(min_length=1_0,eos_token_id=__A )
# check that min length is applied at length 5
_lowerCamelCase : Dict = ids_tensor((batch_size, 2_0),vocab_size=2_0 )
_lowerCamelCase : int = 5
_lowerCamelCase : List[Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Dict = min_dist_processor(__A,__A,cur_len=__A )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(),4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
_lowerCamelCase : List[str] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Any = 1_5
_lowerCamelCase : List[str] = min_dist_processor(__A,__A,cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = 2_0
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : Any = 0
_lowerCamelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
# check that all scores are -inf except the bos_token_id score
_lowerCamelCase : str = ids_tensor((batch_size, 1),vocab_size=2_0 )
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Union[str, Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Dict = logits_processor(__A,__A,cur_len=__A )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(),4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : Optional[Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Dict = logits_processor(__A,__A,cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : str = 2_0
_lowerCamelCase : List[str] = 4
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Any = 5
_lowerCamelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__A,eos_token_id=__A )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowerCamelCase : Optional[Any] = ids_tensor((batch_size, 4),vocab_size=2_0 )
_lowerCamelCase : Any = 4
_lowerCamelCase : Optional[Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Tuple = logits_processor(__A,__A,cur_len=__A )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(),4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Optional[int] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Any = logits_processor(__A,__A,cur_len=__A )
self.assertFalse(jnp.isinf(__A ).any() )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : List[str] = 4
_lowerCamelCase : Any = 1_0
_lowerCamelCase : int = 1_5
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : Tuple = 1
_lowerCamelCase : str = 1_5
# dummy input_ids and scores
_lowerCamelCase : Tuple = ids_tensor((batch_size, sequence_length),__A )
_lowerCamelCase : Tuple = input_ids.copy()
_lowerCamelCase : Dict = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Optional[Any] = scores.copy()
# instantiate all dist processors
_lowerCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase : Union[str, Any] = FlaxTopKLogitsWarper(3 )
_lowerCamelCase : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0,eos_token_id=__A )
_lowerCamelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
_lowerCamelCase : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=__A,eos_token_id=__A )
_lowerCamelCase : Tuple = 1_0
# no processor list
_lowerCamelCase : str = temp_dist_warp(__A,__A,cur_len=__A )
_lowerCamelCase : List[Any] = top_k_warp(__A,__A,cur_len=__A )
_lowerCamelCase : Any = top_p_warp(__A,__A,cur_len=__A )
_lowerCamelCase : Tuple = min_dist_proc(__A,__A,cur_len=__A )
_lowerCamelCase : Optional[int] = bos_dist_proc(__A,__A,cur_len=__A )
_lowerCamelCase : Union[str, Any] = eos_dist_proc(__A,__A,cur_len=__A )
# with processor list
_lowerCamelCase : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase : List[Any] = processor(__A,__A,cur_len=__A )
# scores should be equal
self.assertTrue(jnp.allclose(__A,__A,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(),input_ids_comp.tolist() )
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Dict = 4
_lowerCamelCase : List[str] = 1_0
_lowerCamelCase : Any = 1_5
_lowerCamelCase : Dict = 2
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : Any = 1_5
# dummy input_ids and scores
_lowerCamelCase : Optional[int] = ids_tensor((batch_size, sequence_length),__A )
_lowerCamelCase : Tuple = input_ids.copy()
_lowerCamelCase : Optional[Any] = self._get_uniform_logits(__A,__A )
_lowerCamelCase : Optional[Any] = scores.copy()
# instantiate all dist processors
_lowerCamelCase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
_lowerCamelCase : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowerCamelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0,eos_token_id=__A )
_lowerCamelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__A )
_lowerCamelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=__A,eos_token_id=__A )
_lowerCamelCase : List[str] = 1_0
# no processor list
def run_no_processor_list(__A : Tuple,__A : str,__A : List[Any] ):
_lowerCamelCase : str = temp_dist_warp(__A,__A,cur_len=__A )
_lowerCamelCase : List[str] = top_k_warp(__A,__A,cur_len=__A )
_lowerCamelCase : Any = top_p_warp(__A,__A,cur_len=__A )
_lowerCamelCase : Dict = min_dist_proc(__A,__A,cur_len=__A )
_lowerCamelCase : Dict = bos_dist_proc(__A,__A,cur_len=__A )
_lowerCamelCase : int = eos_dist_proc(__A,__A,cur_len=__A )
return scores
# with processor list
def run_processor_list(__A : str,__A : int,__A : int ):
_lowerCamelCase : Tuple = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowerCamelCase : List[str] = processor(__A,__A,cur_len=__A )
return scores
_lowerCamelCase : str = jax.jit(__A )
_lowerCamelCase : Optional[Any] = jax.jit(__A )
_lowerCamelCase : Optional[Any] = jitted_run_no_processor_list(__A,__A,__A )
_lowerCamelCase : Any = jitted_run_processor_list(__A,__A,__A )
# scores should be equal
self.assertTrue(jnp.allclose(__A,__A,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(),input_ids_comp.tolist() ) | 11 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A ):
def __init__( self : int,__A : Any=None,**__A : Optional[Any] ):
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead.",__A,)
super().__init__(args=__A,**__A ) | 11 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCAmelCase_ : str = random.Random()
if is_torch_available():
import torch
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int=1.0 , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=None ):
"""simple docstring"""
if rng is None:
_lowerCamelCase : List[str] = global_rng
_lowerCamelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[int],__A : Tuple,__A : List[str]=7,__A : Any=4_0_0,__A : Tuple=2_0_0_0,__A : Union[str, Any]=1,__A : Union[str, Any]=0.0,__A : Union[str, Any]=1_6_0_0_0,__A : int=True,__A : Optional[int]=True,):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Dict = min_seq_length
_lowerCamelCase : Optional[int] = max_seq_length
_lowerCamelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCamelCase : str = feature_size
_lowerCamelCase : List[str] = padding_value
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Optional[Any] = return_attention_mask
_lowerCamelCase : Any = do_normalize
def lowerCamelCase_ ( self : Any ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self : Any,__A : str=False,__A : Any=False ):
def _flatten(__A : Union[str, Any] ):
return list(itertools.chain(*__A ) )
if equal_length:
_lowerCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCamelCase : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
_lowerCamelCase : Optional[int] = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = ASTFeatureExtractor
def lowerCamelCase_ ( self : List[str] ):
_lowerCamelCase : List[str] = ASTFeatureExtractionTester(self )
def lowerCamelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
_lowerCamelCase : str = [np.asarray(__A ) for speech_input in speech_inputs]
# Test not batched input
_lowerCamelCase : Any = feat_extract(speech_inputs[0],return_tensors="np" ).input_values
_lowerCamelCase : List[Any] = feat_extract(np_speech_inputs[0],return_tensors="np" ).input_values
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# Test batched
_lowerCamelCase : Dict = feat_extract(__A,padding=__A,return_tensors="np" ).input_values
_lowerCamelCase : str = feat_extract(__A,padding=__A,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__A,__A ):
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_lowerCamelCase : int = np.asarray(__A )
_lowerCamelCase : Dict = feat_extract(__A,return_tensors="np" ).input_values
_lowerCamelCase : Optional[int] = feat_extract(__A,return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__A,__A ):
self.assertTrue(np.allclose(__A,__A,atol=1e-3 ) )
@require_torch
def lowerCamelCase_ ( self : int ):
import torch
_lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCamelCase : List[str] = np.random.rand(1_0_0 ).astype(np.floataa )
_lowerCamelCase : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCamelCase : int = feature_extractor.pad([{"input_values": inputs}],return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCamelCase : Tuple = feature_extractor.pad([{"input_values": inputs}],return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase_ ( self : Optional[int],__A : Optional[int] ):
from datasets import load_dataset
_lowerCamelCase : Optional[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy","clean",split="validation" )
# automatic decoding with librispeech
_lowerCamelCase : List[Any] = ds.sort("id" ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase_ ( self : List[str] ):
# fmt: off
_lowerCamelCase : Optional[int] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_lowerCamelCase : Any = self._load_datasamples(1 )
_lowerCamelCase : int = ASTFeatureExtractor()
_lowerCamelCase : str = feature_extractor(__A,return_tensors="pt" ).input_values
self.assertEquals(input_values.shape,(1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0],__A,atol=1e-4 ) ) | 11 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase_ : List[Any] = {'facebook/blenderbot-3B': 128}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = BlenderbotTokenizer
def __init__( self : Dict,__A : Optional[Any]=None,__A : List[str]=None,__A : Optional[Any]=None,__A : List[Any]="replace",__A : List[Any]="<s>",__A : str="</s>",__A : List[str]="</s>",__A : List[Any]="<s>",__A : Union[str, Any]="<unk>",__A : Optional[Any]="<pad>",__A : Dict="<mask>",__A : Any=False,__A : Tuple=True,**__A : Dict,):
super().__init__(
__A,__A,tokenizer_file=__A,errors=__A,bos_token=__A,eos_token=__A,sep_token=__A,cls_token=__A,unk_token=__A,pad_token=__A,mask_token=__A,add_prefix_space=__A,trim_offsets=__A,**__A,)
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : Optional[Any] = getattr(__A,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = add_prefix_space
_lowerCamelCase : Optional[Any] = pre_tok_class(**__A )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : Any = "post_processor"
_lowerCamelCase : Optional[Any] = getattr(self.backend_tokenizer,__A,__A )
if tokenizer_component_instance:
_lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : List[Any] = tuple(state["sep"] )
if "cls" in state:
_lowerCamelCase : Optional[int] = tuple(state["cls"] )
_lowerCamelCase : List[str] = False
if state.get("add_prefix_space",__A ) != add_prefix_space:
_lowerCamelCase : List[str] = add_prefix_space
_lowerCamelCase : int = True
if state.get("trim_offsets",__A ) != trim_offsets:
_lowerCamelCase : List[str] = trim_offsets
_lowerCamelCase : Dict = True
if changes_to_apply:
_lowerCamelCase : Tuple = getattr(__A,state.pop("type" ) )
_lowerCamelCase : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer,__A,__A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCamelCase_ ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : Optional[Any],__A : List[Any] ):
_lowerCamelCase : Any = AddedToken(__A,lstrip=__A,rstrip=__A ) if isinstance(__A,__A ) else value
_lowerCamelCase : List[str] = value
def lowerCamelCase_ ( self : Optional[int],*__A : Optional[Any],**__A : List[str] ):
_lowerCamelCase : int = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,*__A : Union[str, Any],**__A : List[Any] ):
_lowerCamelCase : Optional[int] = kwargs.get("is_split_into_words",__A )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A,**__A )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
_lowerCamelCase : Optional[int] = self._tokenizer.model.save(__A,name=__A )
return tuple(__A )
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : int = [self.sep_token_id]
_lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Dict,__A : List[int],__A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : List[str],__A : "Conversation" ):
_lowerCamelCase : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
_lowerCamelCase : List[Any] = " ".join(__A )
_lowerCamelCase : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
_lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[int] = int(length / 2 )
for i in range(_lowerCAmelCase , low + middle ):
comp_and_swap(_lowerCAmelCase , _lowerCAmelCase , i + middle , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
bitonic_merge(_lowerCAmelCase , low + middle , _lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : Optional[Any] = int(length / 2 )
bitonic_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 1 )
bitonic_sort(_lowerCAmelCase , low + middle , _lowerCAmelCase , 0 )
bitonic_merge(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Any = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ') | 11 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return 10 - x * x
def A_ ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) >= 0:
raise ValueError("Wrong space!" )
_lowerCamelCase : List[str] = a
while (b - a) >= 0.0_1:
# Find middle point
_lowerCamelCase : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(_lowerCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_lowerCAmelCase ) * equation(_lowerCAmelCase ) < 0:
_lowerCamelCase : Union[str, Any] = c
else:
_lowerCamelCase : Any = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 11 | 1 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = credit_card_number
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Optional[Any] = len(_lowerCAmelCase ) - 2
for i in range(_lowerCAmelCase , -1 , -2 ):
# double the value of every second digit
_lowerCamelCase : str = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_lowerCamelCase : Optional[int] = cc_number[:i] + str(_lowerCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_lowerCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Any = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_lowerCAmelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_lowerCAmelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_lowerCAmelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323') | 11 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase_ : Any = re.compile(R'\s+')
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_lowerCAmelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [len(_lowerCAmelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(_lowerCAmelCase ), "line_max": max(_lowerCAmelCase )}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=5 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["auto-generated", "autogenerated", "automatically generated"]
_lowerCamelCase : Dict = example["content"].splitlines()
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[Any]=0.0_5 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ["unit tests", "test file", "configuration file"]
_lowerCamelCase : int = example["content"].splitlines()
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[Any] = 0
# first test
for _, line in zip(range(_lowerCAmelCase ) , _lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCamelCase : Union[str, Any] = example["content"].count("\n" )
_lowerCamelCase : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = ["def ", "class ", "for ", "while "]
_lowerCamelCase : List[str] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : int=4 ):
"""simple docstring"""
_lowerCamelCase : List[Any] = example["content"].splitlines()
_lowerCamelCase : Union[str, Any] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = tokenizer(example["content"] , truncation=_lowerCAmelCase )["input_ids"]
_lowerCamelCase : str = len(example["content"] ) / len(_lowerCAmelCase )
return {"ratio": ratio}
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = {}
results.update(get_hash(_lowerCAmelCase ) )
results.update(line_stats(_lowerCAmelCase ) )
results.update(alpha_stats(_lowerCAmelCase ) )
results.update(char_token_ratio(_lowerCAmelCase ) )
results.update(is_autogenerated(_lowerCAmelCase ) )
results.update(is_config_or_test(_lowerCAmelCase ) )
results.update(has_no_keywords(_lowerCAmelCase ) )
results.update(has_few_assignments(_lowerCAmelCase ) )
return results
def A_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if not check_uniques(_lowerCAmelCase , _lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
with open(_lowerCAmelCase , "rb" ) as f_in:
with gzip.open(str(_lowerCAmelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCAmelCase , _lowerCAmelCase )
os.unlink(_lowerCAmelCase )
# Settings
UpperCAmelCase_ : Any = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase_ : str = multiprocessing.cpu_count()
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase_ : Dict = time.time()
UpperCAmelCase_ : Any = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
UpperCAmelCase_ : Any = set(ds.unique('hash'))
UpperCAmelCase_ : Dict = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
UpperCAmelCase_ : Optional[int] = time.time()
UpperCAmelCase_ : int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase_ : Tuple = time.time()
UpperCAmelCase_, UpperCAmelCase_ : List[str] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
UpperCAmelCase_ : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase_ : List[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
UpperCAmelCase_ : str = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase_ : Tuple = str(data_dir / f'''file-{file_number+1:012}.json''')
UpperCAmelCase_ : int = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''') | 11 | 1 |
'''simple docstring'''
from __future__ import annotations
def A_ ( _lowerCAmelCase : list , _lowerCAmelCase : int | None = None , _lowerCAmelCase : int | None = None ):
"""simple docstring"""
if start is None:
_lowerCamelCase : Optional[int] = 0
if end is None:
_lowerCamelCase : List[str] = len(_lowerCAmelCase ) - 1
if start >= end:
return
_lowerCamelCase : List[str] = (start + end) // 2
slowsort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
slowsort(_lowerCAmelCase , mid + 1 , _lowerCAmelCase )
if sequence[end] < sequence[mid]:
_lowerCamelCase , _lowerCamelCase : str = sequence[mid], sequence[end]
slowsort(_lowerCAmelCase , _lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod() | 11 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCAmelCase_ : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Model type selected in the list: ' + ', '.join(A )} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase_ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase_ = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase_ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase_ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ = field(
default=A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase_ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase_ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase_ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : Optional[int],__A : SquadDataTrainingArguments,__A : PreTrainedTokenizer,__A : Optional[int] = None,__A : Union[str, Split] = Split.train,__A : Optional[bool] = False,__A : Optional[str] = None,__A : Optional[str] = "pt",):
_lowerCamelCase : Tuple = args
_lowerCamelCase : List[str] = is_language_sensitive
_lowerCamelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A,__A ):
try:
_lowerCamelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_lowerCamelCase : str = mode
# Load data features from cache or dataset file
_lowerCamelCase : str = "v2" if args.version_2_with_negative else "v1"
_lowerCamelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Tuple = cached_features_file + ".lock"
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
_lowerCamelCase : int = time.time()
_lowerCamelCase : int = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCamelCase : Union[str, Any] = self.old_features["features"]
_lowerCamelCase : List[Any] = self.old_features.get("dataset",__A )
_lowerCamelCase : List[Any] = self.old_features.get("examples",__A )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
_lowerCamelCase : Dict = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCamelCase : Dict = self.processor.get_train_examples(args.data_dir )
_lowerCamelCase , _lowerCamelCase : Dict = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__A,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__A,)
_lowerCamelCase : List[Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples},__A,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : Tuple,__A : str ):
# Convert to Tensors and build dataset
_lowerCamelCase : List[str] = self.features[i]
_lowerCamelCase : List[str] = torch.tensor(feature.input_ids,dtype=torch.long )
_lowerCamelCase : Optional[int] = torch.tensor(feature.attention_mask,dtype=torch.long )
_lowerCamelCase : Union[str, Any] = torch.tensor(feature.token_type_ids,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.cls_index,dtype=torch.long )
_lowerCamelCase : str = torch.tensor(feature.p_mask,dtype=torch.float )
_lowerCamelCase : Tuple = torch.tensor(feature.is_impossible,dtype=torch.float )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCamelCase : List[str] = torch.tensor(feature.start_position,dtype=torch.long )
_lowerCamelCase : Any = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs | 11 | 1 |
'''simple docstring'''
import requests
UpperCAmelCase_ : Optional[Any] = 'YOUR API KEY'
def A_ ( _lowerCAmelCase : str , _lowerCAmelCase : str = giphy_api_key ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = "+".join(query.split() )
_lowerCamelCase : Union[str, Any] = F'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
_lowerCamelCase : Union[str, Any] = requests.get(_lowerCAmelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship'))) | 11 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCAmelCase_ : str = '▁'
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = AlbertTokenizer
def __init__( self : Dict,__A : str=None,__A : Union[str, Any]=None,__A : Union[str, Any]=True,__A : int=True,__A : List[Any]=False,__A : Any="[CLS]",__A : List[str]="[SEP]",__A : Tuple="<unk>",__A : Dict="[SEP]",__A : Dict="<pad>",__A : List[str]="[CLS]",__A : Any="[MASK]",**__A : Tuple,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowerCamelCase : Union[str, Any] = (
AddedToken(__A,lstrip=__A,rstrip=__A,normalized=__A )
if isinstance(__A,__A )
else mask_token
)
super().__init__(
__A,tokenizer_file=__A,do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,sep_token=__A,pad_token=__A,cls_token=__A,mask_token=__A,**__A,)
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : List[str] = remove_space
_lowerCamelCase : Any = keep_accents
_lowerCamelCase : Tuple = vocab_file
_lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : List[Any] = [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[str],__A : List[int],__A : Optional[List[int]] = None ):
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : Union[str, Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file,__A )
return (out_vocab_file,) | 11 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.