code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCamelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCamelCase )
return parser.parse_args()
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : Dict = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Tuple = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : Optional[Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase :Optional[int] = logging.get_logger(__name__)
__lowercase :str = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "conditional_detr"
snake_case_ = ["past_key_values"]
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , a : List[str]=True , a : Optional[Any]=None , a : Optional[int]=3 , a : Tuple=3_00 , a : int=6 , a : str=20_48 , a : str=8 , a : Optional[Any]=6 , a : str=20_48 , a : Dict=8 , a : Optional[int]=0.0 , a : Union[str, Any]=0.0 , a : Any=True , a : List[Any]="relu" , a : int=2_56 , a : Optional[int]=0.1 , a : Union[str, Any]=0.0 , a : List[str]=0.0 , a : Optional[int]=0.02 , a : str=1.0 , a : Optional[int]=False , a : str="sine" , a : List[str]="resnet50" , a : Tuple=True , a : List[Any]=False , a : Any=2 , a : Union[str, Any]=5 , a : int=2 , a : Dict=1 , a : Optional[Any]=1 , a : List[Any]=2 , a : Optional[int]=5 , a : List[str]=2 , a : List[Any]=0.25 , **a : Any , ) ->List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
SCREAMING_SNAKE_CASE__ : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(a , a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone_config.get("model_type" )
SCREAMING_SNAKE_CASE__ : Tuple = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : Optional[Any] = config_class.from_dict(a )
SCREAMING_SNAKE_CASE__ : Any = use_timm_backbone
SCREAMING_SNAKE_CASE__ : List[str] = backbone_config
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Dict = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = dropout
SCREAMING_SNAKE_CASE__ : Any = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_xavier_std
SCREAMING_SNAKE_CASE__ : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : int = auxiliary_loss
SCREAMING_SNAKE_CASE__ : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE__ : int = backbone
SCREAMING_SNAKE_CASE__ : Tuple = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Optional[Any] = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Union[str, Any] = class_cost
SCREAMING_SNAKE_CASE__ : int = bbox_cost
SCREAMING_SNAKE_CASE__ : int = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Dict = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = cls_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : Dict = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def A_ ( self : List[Any] ) ->int:
return self.encoder_attention_heads
@property
def A_ ( self : int ) ->int:
return self.d_model
def A_ ( self : Dict ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.__class__.model_type
return output
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = version.parse("1.11" )
@property
def A_ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def A_ ( self : Optional[int] ) ->float:
return 1E-5
@property
def A_ ( self : Any ) ->int:
return 12
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : Tuple , a : Optional[Any] , a : Union[str, Any]=13 , a : List[str]=7 , a : List[str]=True , a : Optional[int]=True , a : Any=True , a : int=True , a : List[str]=True , a : Any=False , a : int=False , a : Tuple=False , a : int=2 , a : Tuple=99 , a : Dict=0 , a : List[str]=32 , a : Dict=5 , a : Optional[int]=4 , a : Tuple=0.1 , a : int=0.1 , a : Union[str, Any]=5_12 , a : Any=2 , a : Optional[Any]=0.02 , a : Dict=2 , a : List[str]=4 , a : List[str]="last" , a : Optional[Any]=True , a : Dict=None , a : Tuple=0 , ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_input_lengths
SCREAMING_SNAKE_CASE__ : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = gelu_activation
SCREAMING_SNAKE_CASE__ : Dict = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = causal
SCREAMING_SNAKE_CASE__ : List[str] = asm
SCREAMING_SNAKE_CASE__ : Dict = n_langs
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = n_special
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : str = num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = summary_type
SCREAMING_SNAKE_CASE__ : int = use_proj
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
SCREAMING_SNAKE_CASE__ : int = bos_token_id
def A_ ( self : str ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A_ ( self : List[str] ) ->Union[str, Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def A_ ( self : Tuple , a : Optional[Any] , a : Dict , a : List[str] , a : List[Any] , a : Union[str, Any] , a : Dict , a : Dict , a : Optional[Any] , a : List[Any] , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = XLMModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a , lengths=a , langs=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a , langs=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[str] , a : Optional[Any] , a : Any , a : List[Any] , a : Dict , a : Union[str, Any] , a : Any , a : Tuple , a : Union[str, Any] , a : Any , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMWithLMHeadModel(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : int , a : List[str] , a : List[Any] , a : Any , a : Dict , a : Union[str, Any] , a : Union[str, Any] , a : int , a : List[Any] , a : Optional[int] , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = XLMForQuestionAnsweringSimple(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(a )
SCREAMING_SNAKE_CASE__ : Dict = model(a , start_positions=a , end_positions=a )
SCREAMING_SNAKE_CASE__ : int = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Union[str, Any] , a : Optional[int] , a : List[str] , a : Optional[int] , a : Union[str, Any] , a : Any , a : Union[str, Any] , a : Tuple , a : int , a : Optional[Any] , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = XLMForQuestionAnswering(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(a )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
SCREAMING_SNAKE_CASE__ : Tuple = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((SCREAMING_SNAKE_CASE__), ) : int = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ : Tuple = model(a , start_positions=a , end_positions=a )
((SCREAMING_SNAKE_CASE__), ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A_ ( self : int , a : int , a : Union[str, Any] , a : Union[str, Any] , a : Tuple , a : Optional[Any] , a : Tuple , a : str , a : Optional[Any] , a : Optional[Any] , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = XLMForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a )
SCREAMING_SNAKE_CASE__ : Tuple = model(a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : int , a : Optional[int] , a : Optional[Any] , a : List[str] , a : str , a : Union[str, Any] , a : Optional[Any] , a : Optional[int] , a : List[str] , a : List[str] , ) ->Dict:
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = XLMForTokenClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Optional[int] , a : Dict , a : int , a : List[Any] , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] , a : List[Any] , a : List[Any] , ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.num_choices
SCREAMING_SNAKE_CASE__ : Dict = XLMForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : List[str] ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _a ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case_ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case_ = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self : int , a : List[str] , a : Union[str, Any] , a : str , a : Optional[int] , a : List[Any] ) ->Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A_ ( self : Optional[Any] , a : Tuple , a : Tuple , a : List[str]=False ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
SCREAMING_SNAKE_CASE__ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
SCREAMING_SNAKE_CASE__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str = XLMModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a , emb_dim=37 )
def A_ ( self : Any ) ->str:
self.config_tester.run_common_tests()
def A_ ( self : Any ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*a )
def A_ ( self : Any ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*a )
def A_ ( self : List[str] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*a )
def A_ ( self : Dict ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*a )
def A_ ( self : Union[str, Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*a )
def A_ ( self : int ) ->Any:
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*a )
def A_ ( self : int ) ->Any:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*a )
def A_ ( self : List[Any] , a : Tuple , a : List[Any] , a : str , a : str , a : Union[str, Any] , a : Optional[int]=False , a : Dict=1 ) ->Union[str, Any]:
self.assertIsInstance(a , a )
self.assertListEqual(
[isinstance(a , a ) for iter_attentions in attentions] , [True] * len(a ) )
self.assertEqual(len(a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(a ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE__ : Tuple = min_length + idx + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = min_length + idx + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(a ) )
def A_ ( self : Any , a : str , a : Tuple , a : Any , a : Any , a : int , a : Union[str, Any]=False , a : str=1 ) ->List[str]:
self.assertIsInstance(a , a )
self.assertListEqual(
[isinstance(a , a ) for iter_hidden_states in hidden_states] , [True] * len(a ) , )
self.assertEqual(len(a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(a ):
# adds PAD dummy token
SCREAMING_SNAKE_CASE__ : str = min_length + idx + 1
SCREAMING_SNAKE_CASE__ : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(a ) , )
pass
@slow
def A_ ( self : Optional[Any] ) ->Any:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : List[Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[int] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(a )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor([[14, 4_47]] , dtype=torch.long , device=a ) # the president
SCREAMING_SNAKE_CASE__ : int = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
SCREAMING_SNAKE_CASE__ : List[Any] = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , a )
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCamelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCamelCase ):
http_head("https://huggingface.co" )
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
class _a :
"""simple docstring"""
def __init__( self : List[str] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Any = {}
def A_ ( self : Any , a : List[Any] ) ->List[Any]:
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE__ : List[str] = {}
self.num_vertices += 1
def A_ ( self : int , a : List[Any] , a : Optional[int] , a : Dict ) ->Tuple:
self.add_vertex(a )
self.add_vertex(a )
if head == tail:
return
SCREAMING_SNAKE_CASE__ : Dict = weight
SCREAMING_SNAKE_CASE__ : Dict = weight
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = edge
edges.remove((tail, head, weight) )
for i in range(len(a ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(edges[i] )
edges.sort(key=lambda a : e[2] )
for i in range(len(a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE__ : Any = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = edge
SCREAMING_SNAKE_CASE__ : int = weight
SCREAMING_SNAKE_CASE__ : List[Any] = weight
def __str__( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE__ : str = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n" )
def A_ ( self : Optional[int] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def A_ ( self : Tuple ) ->Optional[Any]:
return self.adjacency.keys()
@staticmethod
def A_ ( a : int=None , a : Any=None ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
if edges is None:
SCREAMING_SNAKE_CASE__ : Dict = []
for vertex in vertices:
g.add_vertex(a )
for edge in edges:
g.add_edge(*a )
return g
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
def __len__( self : Any ) ->List[str]:
return len(self.parent )
def A_ ( self : str , a : Dict ) ->List[str]:
if item in self.parent:
return self.find(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = item
SCREAMING_SNAKE_CASE__ : List[Any] = 0
return item
def A_ ( self : Optional[Any] , a : Any ) ->Any:
if item not in self.parent:
return self.make_set(a )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE__ : Any = self.find(self.parent[item] )
return self.parent[item]
def A_ ( self : Dict , a : List[str] , a : Optional[Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.find(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.find(a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE__ : List[Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = roota
return roota
return None
@staticmethod
def A_ ( a : Optional[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] = graph.num_vertices
SCREAMING_SNAKE_CASE__ : int = Graph.UnionFind()
SCREAMING_SNAKE_CASE__ : str = []
while num_components > 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE__ : List[Any] = -1
SCREAMING_SNAKE_CASE__ : Dict = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Optional[int] = union_find.find(a )
SCREAMING_SNAKE_CASE__ : int = union_find.find(a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ : Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ : Optional[int] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = cheap_edge[vertex]
if union_find.find(a ) != union_find.find(a ):
union_find.union(a , a )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE__ : str = num_components - 1
SCREAMING_SNAKE_CASE__ : str = Graph.build(edges=a )
return mst
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCAmelCase ( _lowerCamelCase : int = 100 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : Dict = 2
for i in range(2 , max_n + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[int] = pre_numerator
SCREAMING_SNAKE_CASE__ : List[Any] = 2 * i // 3 if i % 3 == 0 else 1
SCREAMING_SNAKE_CASE__ : str = cur_numerator
SCREAMING_SNAKE_CASE__ : Any = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase )
if __name__ == "__main__":
print(f"{solution() = }")
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :List[str] = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
SCREAMING_SNAKE_CASE__ : int = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
SCREAMING_SNAKE_CASE__ : int = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE__ : Optional[int] = key[key.find("patch_embed" ) + len("patch_embed" )]
SCREAMING_SNAKE_CASE__ : Any = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(_lowerCamelCase )-1}""" )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Any = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE__ : List[str] = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
SCREAMING_SNAKE_CASE__ : Tuple = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(_lowerCamelCase )-1}""" )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE__ : int = key[key.find("block" ) + len("block" )]
SCREAMING_SNAKE_CASE__ : List[str] = key.replace(f"""block{idx}""" , f"""block.{int(_lowerCamelCase )-1}""" )
if "attn.q" in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE__ : Any = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.replace("attn" , "attention.self" )
if "fc1" in key:
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("fc1" , "dense1" )
if "fc2" in key:
SCREAMING_SNAKE_CASE__ : Any = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("linear_fuse.conv" , "linear_fuse" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE__ : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
SCREAMING_SNAKE_CASE__ : Dict = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(_lowerCamelCase )-1}""" )
if "bot_conv" in key:
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
SCREAMING_SNAKE_CASE__ : int = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace("module.last_layer_depth" , "head.head" )
SCREAMING_SNAKE_CASE__ : Dict = value
return new_state_dict
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE__ : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE__ : Optional[int] = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
SCREAMING_SNAKE_CASE__ : Dict = GLPNImageProcessor()
# prepare image
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
# rename keys
SCREAMING_SNAKE_CASE__ : int = rename_keys(_lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Optional[int] = GLPNForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__lowercase :Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__lowercase :Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from __future__ import annotations
class _a :
"""simple docstring"""
def __init__( self : Dict , a : str , a : str ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = text, pattern
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = len(a ), len(a )
def A_ ( self : int , a : str ) ->int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A_ ( self : Tuple , a : int ) ->int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A_ ( self : Optional[Any] ) ->list[int]:
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.mismatch_in_text(a )
if mismatch_index == -1:
positions.append(a )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ : List[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__lowercase :Any = "ABAABA"
__lowercase :Optional[Any] = "AB"
__lowercase :Union[str, Any] = BoyerMooreSearch(text, pattern)
__lowercase :Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square(_lowerCamelCase : int , _lowerCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE__ : Any = update_area_of_max_square(_lowerCamelCase , col + 1 )
SCREAMING_SNAKE_CASE__ : Dict = update_area_of_max_square(row + 1 , col + 1 )
SCREAMING_SNAKE_CASE__ : Any = update_area_of_max_square(row + 1 , _lowerCamelCase )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Any = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : Any = max(largest_square_area[0] , _lowerCamelCase )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : Tuple = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]] ):
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE__ : Optional[Any] = update_area_of_max_square_using_dp_array(_lowerCamelCase , col + 1 , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = update_area_of_max_square_using_dp_array(row + 1 , _lowerCamelCase , _lowerCamelCase )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : Tuple = max(largest_square_area[0] , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : str = [0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _lowerCamelCase )
return largest_square_area[0]
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE__ : Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ : Any = dp_array[row][col + 1]
SCREAMING_SNAKE_CASE__ : List[str] = dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE__ : Tuple = dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : Dict = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = max(dp_array[row][col] , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : int = 0
return largest_square_area
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[list[int]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : List[str] = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : List[str] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = current_row[col + 1]
SCREAMING_SNAKE_CASE__ : Dict = next_row[col + 1]
SCREAMING_SNAKE_CASE__ : int = next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 + min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = max(current_row[col] , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowercase :Any = "base_with_context"
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE__ : Optional[int] = weights[f"""layers_{lyr_num}"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
SCREAMING_SNAKE_CASE__ : List[str] = ly_weight["attention"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : int = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE__ : Dict = weights[f"""layers_{lyr_num}"""]
SCREAMING_SNAKE_CASE__ : List[str] = ly_weight["attention"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : int = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = weights[f"""layers_{lyr_num}"""]
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : List[Any] = ly_weight["self_attention"]
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ly_weight["MultiHeadDotProductAttention_0"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : str = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
SCREAMING_SNAKE_CASE__ : str = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE__ : List[str] = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
SCREAMING_SNAKE_CASE__ : int = os.path.join(args.checkpoint_path , ".." , "config.gin" )
SCREAMING_SNAKE_CASE__ : int = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
SCREAMING_SNAKE_CASE__ : int = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
SCREAMING_SNAKE_CASE__ : Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE__ : int = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowercase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__lowercase :str = parser.parse_args()
main(args)
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from __future__ import annotations
import math
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool , _lowerCamelCase : list[int] , _lowerCamelCase : float ):
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [90, 23, 6, 33, 21, 65, 123, 34_423]
SCREAMING_SNAKE_CASE__ : List[Any] = math.log(len(_lowerCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = BertConfig.from_json_file(_lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
__lowercase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowercase :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CodeGenTokenizer
snake_case_ = CodeGenTokenizerFast
snake_case_ = True
snake_case_ = {"add_prefix_space": True}
snake_case_ = False
def A_ ( self : List[Any] ) ->List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE__ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE__ : List[str] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def A_ ( self : int , **a : str ) ->Dict:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a )
def A_ ( self : int , **a : List[str] ) ->Tuple:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a )
def A_ ( self : Dict , a : Any ) ->Any:
SCREAMING_SNAKE_CASE__ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[int] = "lower newer"
return input_text, output_text
def A_ ( self : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__ : Any = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : int = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize(a , add_prefix_space=a )
self.assertListEqual(a , a )
SCREAMING_SNAKE_CASE__ : List[str] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def A_ ( self : Union[str, Any] ) ->Any:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a )
SCREAMING_SNAKE_CASE__ : Any = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize(a , add_prefix_space=a )
SCREAMING_SNAKE_CASE__ : Dict = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(a , add_special_tokens=a , add_prefix_space=a )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(a , add_prefix_space=a )
SCREAMING_SNAKE_CASE__ : Any = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Any = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a ) , a )
def A_ ( self : Any , *a : Tuple , **a : Optional[int] ) ->Tuple:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A_ ( self : Dict , a : Optional[int]=15 ) ->Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE__ : Any = self.rust_tokenizer_class.from_pretrained(a , **a )
# Simple input
SCREAMING_SNAKE_CASE__ : int = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding="max_length" )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding="max_length" )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding="max_length" , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding="max_length" )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding="max_length" )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding="max_length" , )
def A_ ( self : List[str] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Tuple = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE__ : Optional[int] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : List[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a , padding="max_length" , max_length=30 , return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(a , padding=a , truncate=a , return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(*a , padding="max_length" , max_length=60 , return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(a , padding=a , truncate=a , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = "$$$"
SCREAMING_SNAKE_CASE__ : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a , add_bos_token=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "This is a simple input"
SCREAMING_SNAKE_CASE__ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(a )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(a )
self.assertEqual(out_s.input_ids[0] , a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A_ ( self : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
SCREAMING_SNAKE_CASE__ : List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
SCREAMING_SNAKE_CASE__ : Optional[int] = "\nif len_a > len_b: result = a\nelse: result = b"
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(a )
SCREAMING_SNAKE_CASE__ : List[str] = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.decode(a , truncate_before_pattern=a )
self.assertEqual(a , a )
def A_ ( self : Union[str, Any] ) ->Tuple:
pass
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase :Optional[int] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[int] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : Tuple , ) ->Any:
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
SCREAMING_SNAKE_CASE__ : int = field
SCREAMING_SNAKE_CASE__ : int = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ : Tuple = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def A_ ( self : List[str] ) ->Any:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : Any , ) ->Any:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
SCREAMING_SNAKE_CASE__ : Tuple = dataset
SCREAMING_SNAKE_CASE__ : List[str] = path_or_buf
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Any = num_proc
SCREAMING_SNAKE_CASE__ : Optional[int] = "utf-8"
SCREAMING_SNAKE_CASE__ : Any = to_json_kwargs
def A_ ( self : Union[str, Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = self.to_json_kwargs.pop("path_or_buf" , a )
SCREAMING_SNAKE_CASE__ : List[str] = self.to_json_kwargs.pop("orient" , "records" )
SCREAMING_SNAKE_CASE__ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
SCREAMING_SNAKE_CASE__ : Any = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
SCREAMING_SNAKE_CASE__ : Any = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def A_ ( self : List[Any] , a : int ) ->str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = args
SCREAMING_SNAKE_CASE__ : str = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def A_ ( self : str , a : BinaryIO , a : Optional[Any] , a : List[Any] , a : str , **a : Optional[int] , ) ->int:
SCREAMING_SNAKE_CASE__ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
SCREAMING_SNAKE_CASE__ : int = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : List[Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE__ : Optional[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : int = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : Dict = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE__ : Optional[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : str = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
SCREAMING_SNAKE_CASE__ : int = features.copy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : Dict = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE__ : Optional[Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] ):
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[Any] = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [jsonl_path]
SCREAMING_SNAKE_CASE__ : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=("train",) ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
SCREAMING_SNAKE_CASE__ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : Optional[int] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE__ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : List[str] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : int = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
if split:
SCREAMING_SNAKE_CASE__ : int = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "train"
SCREAMING_SNAKE_CASE__ : Dict = {"train": jsonl_path, "test": jsonl_path}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE__ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE__ : int = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
return json.load(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
return [json.loads(_lowerCamelCase ) for line in buffer]
class _a :
"""simple docstring"""
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def A_ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[int] ) ->Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(a , a , lines=a ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ : str = load_json_function(a )
assert isinstance(a , a )
assert isinstance(exported_content[0] , a )
assert len(a ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def A_ ( self : Optional[int] , a : List[Any] , a : Tuple , a : Optional[Any] , a : Optional[int] , a : List[str] ) ->str:
with io.BytesIO() as buffer:
JsonDatasetWriter(a , a , lines=a , orient=a ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_json(a )
assert isinstance(a , a )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(a , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(a ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def A_ ( self : List[Any] , a : Optional[Any] , a : str , a : str ) ->Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(a , a , lines=a , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = load_json_function(a )
assert isinstance(a , a )
assert isinstance(exported_content[0] , a )
assert len(a ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def A_ ( self : Optional[Any] , a : Optional[Any] , a : List[str] , a : Dict , a : Optional[Any] , a : Optional[Any] ) ->Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(a , a , lines=a , orient=a , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ : Dict = load_json(a )
assert isinstance(a , a )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(a , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(a ) == 10
def A_ ( self : str , a : Dict ) ->Optional[Any]:
with pytest.raises(a ):
with io.BytesIO() as buffer:
JsonDatasetWriter(a , a , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def A_ ( self : Union[str, Any] , a : Dict , a : Union[str, Any] , a : List[Any] , a : List[Any] , a : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Dict = tmp_path_factory.mktemp("data" ) / f"""test.json.{extension}"""
SCREAMING_SNAKE_CASE__ : List[str] = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(a , a , compression=a ).write()
with fsspec.open(a , "rb" , compression="infer" ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.read()
with fsspec.open(a , "rb" , compression="infer" ) as f:
SCREAMING_SNAKE_CASE__ : Any = f.read()
assert exported_content == original_content
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowercase :Any = ""
__lowercase :Optional[Any] = ""
__lowercase :Tuple = ""
__lowercase :List[Any] = 1 # (0 is vertical, 1 is horizontal)
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_dataset(_lowerCamelCase , _lowerCamelCase )
print("Processing..." )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = update_image_and_anno(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE__ : Dict = random_chars(32 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(_lowerCamelCase )} with {file_name}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for anno in new_annos[index]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(_lowerCamelCase )
with open(f"""/{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCamelCase ) as in_file:
SCREAMING_SNAKE_CASE__ : List[str] = in_file.readlines()
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(_lowerCamelCase , f"""{label_name}.jpg""" )
SCREAMING_SNAKE_CASE__ : List[str] = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE__ : Optional[int] = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int = 1 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : str = []
for idx in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = img_list[idx]
path_list.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = anno_list[idx]
SCREAMING_SNAKE_CASE__ : List[str] = cva.imread(_lowerCamelCase )
if flip_type == 1:
SCREAMING_SNAKE_CASE__ : str = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
SCREAMING_SNAKE_CASE__ : int = cva.flip(_lowerCamelCase , _lowerCamelCase )
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def UpperCAmelCase ( _lowerCamelCase : int = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE__ : List[str] = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowercase :Union[str, Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowercase :Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowercase :List[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random.randint(0 , len(_lowerCamelCase ) - 1 )
SCREAMING_SNAKE_CASE__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : list[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : tuple[str, float] , _lowerCamelCase : list[tuple[str, float]] , _lowerCamelCase : list[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE__ : Optional[int] = int(parent_a[1] * 100 ) + 1
SCREAMING_SNAKE_CASE__ : str = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = population_score[random.randint(0 , _lowerCamelCase )][0]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : list[str] , _lowerCamelCase : bool = True ):
'''simple docstring'''
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE__ : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE__ : Optional[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE__ : int = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_lowerCamelCase )
# Generate random starting population.
SCREAMING_SNAKE_CASE__ : List[Any] = []
for _ in range(_lowerCamelCase ):
population.append("".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE__ : List[str] = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE__ : Any = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE__ : Any = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE__ : List[Any] = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__lowercase :Tuple = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
__lowercase :int = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
__lowercase , __lowercase , __lowercase :Any = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase :Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
__lowercase :Optional[int] = 5
__lowercase :Optional[Any] = 10
@require_sentencepiece
@require_tokenizers
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = SpeechaTextTokenizer
snake_case_ = False
snake_case_ = True
def A_ ( self : Union[str, Any] ) ->Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[Any] = sp.SentencePieceProcessor()
spm_model.Load(a )
SCREAMING_SNAKE_CASE__ : Any = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(a ) )]
SCREAMING_SNAKE_CASE__ : List[str] = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE__ : str = Path(self.tmpdirname )
save_json(a , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a , save_dir / VOCAB_FILES_NAMES["spm_file"] )
SCREAMING_SNAKE_CASE__ : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Optional[int] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = "<pad>"
SCREAMING_SNAKE_CASE__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def A_ ( self : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a ) , 10_01 )
def A_ ( self : List[Any] ) ->Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def A_ ( self : int ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def A_ ( self : List[str] ) ->Tuple:
# fmt: off
SCREAMING_SNAKE_CASE__ : Tuple = {"input_ids": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = "valhalla/s2t_mustc_multilinguial_medium"
snake_case_ = "C'est trop cool"
snake_case_ = "Esto es genial"
@classmethod
def A_ ( cls : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def A_ ( self : Any ) ->Dict:
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def A_ ( self : Union[str, Any] ) ->str:
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def A_ ( self : int ) ->Union[str, Any]:
self.assertIn(a , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : List[Any] = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.decode(a , skip_special_tokens=a )
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = "fr"
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , a )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def A_ ( self : int ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : int = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__lowercase :Any = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "linear"
snake_case_ = "cosine"
snake_case_ = "cosine_with_restarts"
snake_case_ = "polynomial"
snake_case_ = "constant"
snake_case_ = "constant_with_warmup"
snake_case_ = "piecewise_constant"
def UpperCAmelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
'''simple docstring'''
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
'''simple docstring'''
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : List[str] = step_rules.split("," )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = rule_str.split(":" )
SCREAMING_SNAKE_CASE__ : Dict = int(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = float(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = value
SCREAMING_SNAKE_CASE__ : str = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : Any , _lowerCamelCase : List[Any] ):
def rule_func(_lowerCamelCase : int ) -> float:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE__ : List[Any] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]=-1 ):
'''simple docstring'''
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
'''simple docstring'''
def lr_lambda(_lowerCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
'''simple docstring'''
def lr_lambda(_lowerCamelCase : List[str] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any=1E-7 , _lowerCamelCase : Any=1.0 , _lowerCamelCase : int=-1 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE__ : Any = lr_init - lr_end
SCREAMING_SNAKE_CASE__ : Any = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE__ : str = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE__ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowercase :Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCAmelCase ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = SchedulerType(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_lowerCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase :List[Any] = "bart"
__lowercase :str = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
SCREAMING_SNAKE_CASE__ : Dict = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
SCREAMING_SNAKE_CASE__ : Tuple = qar_model.eval()
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = (None, None)
if MODEL_TYPE == "bart":
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
SCREAMING_SNAKE_CASE__ : int = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
SCREAMING_SNAKE_CASE__ : int = sas_model.eval()
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
if LOAD_DENSE_INDEX:
SCREAMING_SNAKE_CASE__ : str = faiss.StandardGpuResources()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
SCREAMING_SNAKE_CASE__ : str = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
SCREAMING_SNAKE_CASE__ : str = faiss.IndexFlatIP(128 )
SCREAMING_SNAKE_CASE__ : List[str] = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = (None, None)
SCREAMING_SNAKE_CASE__ : List[str] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = datasets.load_dataset("eli5" , name="LFQA_reddit" )
SCREAMING_SNAKE_CASE__ : List[Any] = elia["train_eli5"]
SCREAMING_SNAKE_CASE__ : str = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
SCREAMING_SNAKE_CASE__ : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase :int = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase :Tuple = load_models()
__lowercase , __lowercase :Union[str, Any] = load_train_data()
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]=10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int="wiki40b" , _lowerCamelCase : int="dense" , _lowerCamelCase : List[str]=10 ):
'''simple docstring'''
if source == "none":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="english_wiki40b_snippets_100w" , n_results=_lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : str = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
SCREAMING_SNAKE_CASE__ : str = "question: {} context: {}".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]=64 , _lowerCamelCase : str=256 , _lowerCamelCase : List[str]=False , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=0.9_5 , _lowerCamelCase : Dict=0.8 ):
'''simple docstring'''
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1_024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__lowercase :Tuple = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__lowercase :int = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase :Union[str, Any] = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase :Optional[Any] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__lowercase :Tuple = st.sidebar.checkbox("Demo options")
if demo_options:
__lowercase :Any = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__lowercase :Optional[Any] = action_list.index(action_st)
__lowercase :Any = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__lowercase :int = show_type == "Show full text of passages"
else:
__lowercase :List[Any] = 3
__lowercase :int = True
__lowercase :int = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__lowercase :Dict = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__lowercase :Tuple = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__lowercase :List[str] = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__lowercase :Dict = "wiki40b"
__lowercase :Any = "dense"
__lowercase :List[str] = "beam"
__lowercase :List[str] = 2
__lowercase :Union[str, Any] = 64
__lowercase :Tuple = 256
__lowercase :Optional[Any] = None
__lowercase :Optional[Any] = None
__lowercase :Dict = st.sidebar.checkbox("Generation options")
if generate_options:
__lowercase :Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__lowercase :int = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__lowercase :Any = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowercase :str = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase :Dict = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase :List[str] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__lowercase :Union[str, Any] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__lowercase :List[str] = None
# start main text
__lowercase :List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__lowercase :int = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase :Tuple = st.text_input("Enter your question here:", "")
else:
__lowercase :Optional[int] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase :int = make_support(question, source=wiki_source, method="dense", n_results=10)
__lowercase , __lowercase :Dict = make_support(question, source=wiki_source, method="sparse", n_results=10)
__lowercase :int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase :Optional[Any] = support_list[:10]
__lowercase :Union[str, Any] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__lowercase , __lowercase :str = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase :List[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__lowercase :Union[str, Any] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__lowercase :List[Any] = res[1].strip()
if sec_titles == "":
__lowercase :List[str] = "[{}]({})".format(res[0], wiki_url)
else:
__lowercase :List[str] = sec_titles.split(" & ")
__lowercase :Dict = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase :Tuple = find_nearest_training(question)
__lowercase :str = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__lowercase :Tuple = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__lowercase :str = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
__lowercase :List[Any] = False
__lowercase :Union[str, Any] = False
def UpperCAmelCase ( _lowerCamelCase : Namespace ):
'''simple docstring'''
return TrainCommand(_lowerCamelCase )
class _a ( lowercase__ ):
"""simple docstring"""
@staticmethod
def A_ ( a : ArgumentParser ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=a , required=a , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=a , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=a , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=a , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=a , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=a , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=a , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=a , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=a , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=a , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=a , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=a , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=a , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=a )
def __init__( self : Union[str, Any] , a : Namespace ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger("transformers-cli/training" )
SCREAMING_SNAKE_CASE__ : Dict = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=a )
SCREAMING_SNAKE_CASE__ : Tuple = args.output
SCREAMING_SNAKE_CASE__ : Optional[Any] = args.column_label
SCREAMING_SNAKE_CASE__ : int = args.column_text
SCREAMING_SNAKE_CASE__ : int = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[Any] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
SCREAMING_SNAKE_CASE__ : Tuple = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : str = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
SCREAMING_SNAKE_CASE__ : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Tuple = args.validation_split
SCREAMING_SNAKE_CASE__ : str = args.train_batch_size
SCREAMING_SNAKE_CASE__ : Tuple = args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = args.learning_rate
SCREAMING_SNAKE_CASE__ : List[str] = args.adam_epsilon
def A_ ( self : Dict ) ->Dict:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A_ ( self : int ) ->Union[str, Any]:
raise NotImplementedError
def A_ ( self : Tuple ) ->str:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :Any = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
SCREAMING_SNAKE_CASE__ : Dict = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , _lowerCamelCase )
if matches:
SCREAMING_SNAKE_CASE__ : int = float(matches[1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
SCREAMING_SNAKE_CASE__ : Any = 1_001
SCREAMING_SNAKE_CASE__ : Tuple = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE__ : Dict = "huggingface/label-files"
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE__ : List[str] = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : int = "background"
SCREAMING_SNAKE_CASE__ : List[Any] = idalabel
SCREAMING_SNAKE_CASE__ : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
SCREAMING_SNAKE_CASE__ : Any = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
SCREAMING_SNAKE_CASE__ : Tuple = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : List[Any] = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
SCREAMING_SNAKE_CASE__ : Dict = "google/" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__lowercase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowercase :Union[str, Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowercase :Optional[int] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Any = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :List[Any] = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__lowercase :List[str] = logging.get_logger(__name__)
__lowercase :int = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "bloom"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : List[str] , a : Dict=25_08_80 , a : Optional[int]=64 , a : Optional[int]=2 , a : Any=8 , a : List[Any]=1E-5 , a : int=0.02 , a : List[Any]=True , a : Optional[int]=1 , a : Optional[Any]=2 , a : int=False , a : Tuple=0.0 , a : Optional[int]=0.0 , a : Optional[Any]=1 , a : Any=False , **a : Optional[Any] , ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Any = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("n_embed" , a )
SCREAMING_SNAKE_CASE__ : int = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE__ : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ : List[str] = pretraining_tp
SCREAMING_SNAKE_CASE__ : str = apply_residual_connection_post_layernorm
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = slow_but_exact
super().__init__(bos_token_id=a , eos_token_id=a , **a )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = version.parse("1.12" )
def __init__( self : Optional[int] , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ) ->int:
super().__init__(a , task=a , patching_specs=a , use_past=a )
if not getattr(self._config , "pad_token_id" , a ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE__ : int = 0
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(a , direction="inputs" , inverted_values_shape=a )
SCREAMING_SNAKE_CASE__ : Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return common_inputs
@property
def A_ ( self : int ) ->int:
return self._config.n_layer
@property
def A_ ( self : Union[str, Any] ) ->int:
return self._config.n_head
@property
def A_ ( self : Optional[int] ) ->float:
return 1E-3
def A_ ( self : Optional[Any] , a : "PreTrainedTokenizer" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : str = seqlen + 2
SCREAMING_SNAKE_CASE__ : int = self._config.hidden_size // self.num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
SCREAMING_SNAKE_CASE__ : List[str] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
SCREAMING_SNAKE_CASE__ : List[str] = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE__ : Optional[int] = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE__ : Tuple = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def A_ ( self : Optional[Any] ) ->int:
return 13
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
"""simple docstring"""
snake_case_ = None
def A_ ( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : int = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a )
def A_ ( self : str ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(a , "feat_extract.json" )
feat_extract_first.to_json_file(a )
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class.from_json_file(a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self : Optional[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract_first.save_pretrained(a )[0]
check_json_file_has_correct_format(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class()
self.assertIsNotNone(a )
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
from collections.abc import Iterable
from typing import Any
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , a : int | None = None ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = value
SCREAMING_SNAKE_CASE__ : Node | None = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
def __repr__( self : int ) ->str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : Node | None = None ) ->Any:
SCREAMING_SNAKE_CASE__ : Dict = root
def __str__( self : str ) ->str:
return str(self.root )
def A_ ( self : str , a : Node , a : Node | None ) ->None:
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE__ : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(a ): # If it is the right children
SCREAMING_SNAKE_CASE__ : Dict = new_children
else:
SCREAMING_SNAKE_CASE__ : Any = new_children
else:
SCREAMING_SNAKE_CASE__ : List[str] = new_children
def A_ ( self : str , a : Node ) ->bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def A_ ( self : Union[str, Any] ) ->bool:
return self.root is None
def A_ ( self : Optional[int] , a : Dict ) ->None:
SCREAMING_SNAKE_CASE__ : List[Any] = Node(a ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE__ : Dict = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE__ : List[Any] = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_node
break
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent_node.right
SCREAMING_SNAKE_CASE__ : List[str] = parent_node
def A_ ( self : Union[str, Any] , *a : int ) ->None:
for value in values:
self.__insert(a )
def A_ ( self : Any , a : Any ) ->Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
SCREAMING_SNAKE_CASE__ : Tuple = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node.left if value < node.value else node.right
return node
def A_ ( self : Optional[Any] , a : Node | None = None ) ->Node | None:
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE__ : str = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE__ : int = node.right
return node
def A_ ( self : List[Any] , a : Node | None = None ) ->Node | None:
if node is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE__ : Tuple = node.left
return node
def A_ ( self : Optional[Any] , a : int ) ->None:
SCREAMING_SNAKE_CASE__ : Dict = self.search(a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(a , a )
elif node.left is None: # Has only right children
self.__reassign_nodes(a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(a , node.left )
else:
SCREAMING_SNAKE_CASE__ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def A_ ( self : Any , a : Node | None ) ->Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def A_ ( self : str , a : List[Any]=None ) ->Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def A_ ( self : List[Any] , a : list , a : Node | None ) ->None:
if node:
self.inorder(a , node.left )
arr.append(node.value )
self.inorder(a , node.right )
def A_ ( self : Optional[Any] , a : int , a : Node ) ->int:
SCREAMING_SNAKE_CASE__ : list[int] = []
self.inorder(a , a ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCAmelCase ( _lowerCamelCase : Node | None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
if curr_node is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE__ : List[str] = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1_000 , _lowerCamelCase : bool = True ):
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
and isinstance(_lowerCamelCase , _lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return int((number_a + number_a) / 2 )
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
assert (
isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowerCamelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = lower
SCREAMING_SNAKE_CASE__ : Tuple = higher
SCREAMING_SNAKE_CASE__ : Tuple = []
while True:
SCREAMING_SNAKE_CASE__ : List[Any] = get_avg(_lowerCamelCase , _lowerCamelCase )
last_numbers.append(_lowerCamelCase )
if answer(_lowerCamelCase ) == "low":
SCREAMING_SNAKE_CASE__ : Dict = number
elif answer(_lowerCamelCase ) == "high":
SCREAMING_SNAKE_CASE__ : int = number
else:
break
print(f"""guess the number : {last_numbers[-1]}""" )
print(f"""details : {last_numbers!s}""" )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(input("Enter lower value : " ).strip() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(input("Enter high value : " ).strip() )
SCREAMING_SNAKE_CASE__ : Dict = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :List[str] = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "encoder-decoder"
snake_case_ = True
def __init__( self : Optional[int] , **a : Dict ) ->Union[str, Any]:
super().__init__(**a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("encoder" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_config.pop("model_type" )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("decoder" )
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ : List[Any] = AutoConfig.for_model(a , **a )
SCREAMING_SNAKE_CASE__ : Any = AutoConfig.for_model(a , **a )
SCREAMING_SNAKE_CASE__ : Dict = True
@classmethod
def A_ ( cls : int , a : PretrainedConfig , a : PretrainedConfig , **a : List[str] ) ->PretrainedConfig:
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a )
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Dict = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any=1_024 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = [], []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(zip(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = sorted_examples[0]
def is_too_big(_lowerCamelCase : int ):
return tok(_lowerCamelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE__ : int = new_src + " " + src
SCREAMING_SNAKE_CASE__ : str = new_tgt + " " + tgt
if is_too_big(_lowerCamelCase ) or is_too_big(_lowerCamelCase ): # cant fit, finalize example
finished_src.append(_lowerCamelCase )
finished_tgt.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowerCamelCase )
finished_tgt.append(_lowerCamelCase )
return finished_src, finished_tgt
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Path , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowerCamelCase )
save_path.mkdir(exist_ok=_lowerCamelCase )
for split in ["train"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
SCREAMING_SNAKE_CASE__ : str = [x.rstrip() for x in Path(_lowerCamelCase ).open().readlines()]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x.rstrip() for x in Path(_lowerCamelCase ).open().readlines()]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = pack_examples(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(f"""packed {split} split from {len(_lowerCamelCase )} examples -> {len(_lowerCamelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(_lowerCamelCase ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(_lowerCamelCase ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(_lowerCamelCase , save_path / f"""{split}.source""" )
shutil.copyfile(_lowerCamelCase , save_path / f"""{split}.target""" )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_lowerCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_lowerCamelCase , default=128 )
parser.add_argument("--data_dir" , type=_lowerCamelCase )
parser.add_argument("--save_path" , type=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : list ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while len(_lowerCamelCase ) > 1:
SCREAMING_SNAKE_CASE__ : str = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = files.index(min(_lowerCamelCase ) )
temp += files[min_index]
files.pop(_lowerCamelCase )
files.append(_lowerCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
SCREAMING_SNAKE_CASE__ : Optional[int] = min(_lowerCamelCase , _lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 1 |
from __future__ import annotations
import numpy as np
def UpperCAmelCase ( _lowerCamelCase : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = np.shape(_lowerCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE__ : List[str] = (
"'table' has to be of square shaped array but got a "
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.zeros((rows, columns) )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE__ : Any = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE__ : Tuple = 1
for j in range(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : Any = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowercase :Tuple = logging.get_logger(__name__)
class _a ( enum.Enum ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
@add_end_docstrings(lowercase__ )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "generated"
def __init__( self : List[str] , *a : Any , **a : int ) ->Tuple:
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def A_ ( self : Dict , a : Any=None , a : List[Any]=None , a : Union[str, Any]=None , a : str=None , a : Optional[int]=None , a : List[Any]=None , **a : Union[str, Any] , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = {}
if truncation is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = truncation
SCREAMING_SNAKE_CASE__ : List[Any] = generate_kwargs
SCREAMING_SNAKE_CASE__ : Tuple = {}
if return_tensors is not None and return_type is None:
SCREAMING_SNAKE_CASE__ : Tuple = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
SCREAMING_SNAKE_CASE__ : str = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
SCREAMING_SNAKE_CASE__ : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def A_ ( self : int , a : int , a : int , a : int ) ->Any:
return True
def A_ ( self : int , *a : Dict , a : List[str] ) ->Any:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , a ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
SCREAMING_SNAKE_CASE__ : Tuple = ([prefix + arg for arg in args[0]],)
SCREAMING_SNAKE_CASE__ : Tuple = True
elif isinstance(args[0] , a ):
SCREAMING_SNAKE_CASE__ : Any = (prefix + args[0],)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(*a , padding=a , truncation=a , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] , *a : Optional[Any] , **a : Optional[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(*a , **a )
if (
isinstance(args[0] , a )
and all(isinstance(a , a ) for el in args[0] )
and all(len(a ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def A_ ( self : Any , a : Optional[Any] , a : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , **a : int ) ->Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._parse_and_tokenize(a , truncation=a , **a )
return inputs
def A_ ( self : Optional[Any] , a : Dict , **a : List[Any] ) ->Optional[int]:
if self.framework == "pt":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs["input_ids"].shape
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = tf.shape(model_inputs["input_ids"] ).numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = generate_kwargs.get("min_length" , self.model.config.min_length )
SCREAMING_SNAKE_CASE__ : Any = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(a , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(**a , **a )
SCREAMING_SNAKE_CASE__ : str = output_ids.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : str = output_ids.reshape(a , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.reshape(a , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def A_ ( self : int , a : str , a : Union[str, Any]=ReturnType.TEXT , a : Tuple=False ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
SCREAMING_SNAKE_CASE__ : List[str] = {
f"""{self.return_name}_text""": self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
}
records.append(a )
return records
@add_end_docstrings(lowercase__ )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "summary"
def __call__( self : Union[str, Any] , *a : Dict , **a : Union[str, Any] ) ->int:
return super().__call__(*a , **a )
def A_ ( self : Optional[Any] , a : int , a : int , a : int ) ->bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(lowercase__ )
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "translation"
def A_ ( self : Tuple , a : int , a : int , a : int ) ->List[Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def A_ ( self : Optional[Any] , *a : List[Any] , a : str=TruncationStrategy.DO_NOT_TRUNCATE , a : List[str]=None , a : Optional[Any]=None ) ->Optional[int]:
if getattr(self.tokenizer , "_build_translation_inputs" , a ):
return self.tokenizer._build_translation_inputs(
*a , return_tensors=self.framework , truncation=a , src_lang=a , tgt_lang=a )
else:
return super()._parse_and_tokenize(*a , truncation=a )
def A_ ( self : Union[str, Any] , a : int=None , a : Tuple=None , **a : int ) ->Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = super()._sanitize_parameters(**a )
if src_lang is not None:
SCREAMING_SNAKE_CASE__ : int = src_lang
if tgt_lang is not None:
SCREAMING_SNAKE_CASE__ : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
SCREAMING_SNAKE_CASE__ : int = kwargs.get("task" , self.task )
SCREAMING_SNAKE_CASE__ : Tuple = task.split("_" )
if task and len(a ) == 4:
# translation, XX, to YY
SCREAMING_SNAKE_CASE__ : str = items[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , *a : int , **a : int ) ->int:
return super().__call__(*a , **a )
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE__ : List[Any] = TensorFlowBenchmark(args=_lowerCamelCase )
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
SCREAMING_SNAKE_CASE__ : Optional[Any] = " ".join(str(_lowerCamelCase ).split(" " )[:-1] )
SCREAMING_SNAKE_CASE__ : Dict = ""
SCREAMING_SNAKE_CASE__ : Tuple = eval(str(_lowerCamelCase ).split(" " )[-1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Any = full_error_msg + begin_error_msg + str(_lowerCamelCase )
raise ValueError(_lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 26 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
__lowercase :Dict = datasets.logging.get_logger(__name__)
__lowercase :Union[str, Any] = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
__lowercase :Optional[Any] = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
__lowercase :Dict = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : Any ) ->List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def A_ ( self : List[Any] , a : List[Any] ) ->Union[str, Any]:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ : Dict = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
SCREAMING_SNAKE_CASE__ : Any = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def A_ ( self : List[Any] , a : int , a : List[Any] , a : str , a : Any=None , a : int=False ) ->str:
if gpus is None:
SCREAMING_SNAKE_CASE__ : str = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"src": sources, "mt": predictions, "ref": references}
SCREAMING_SNAKE_CASE__ : List[str] = [dict(zip(a , a ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = self.scorer.predict(a , gpus=a , progress_bar=a )
return {"mean_score": mean_score, "scores": scores}
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCAmelCase ( _lowerCamelCase : int = 5_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Any = pentagonal_nums[j]
SCREAMING_SNAKE_CASE__ : int = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE__ : Optional[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 26 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 1 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ) ->int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[Any] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
SCREAMING_SNAKE_CASE__ : List[str] = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def A_ ( self : str , **a : List[str] ) ->List[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **a )
def A_ ( self : int , a : Any ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE__ : List[Any] = "adapt react readapt apt"
return input_text, output_text
def A_ ( self : Union[str, Any] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : List[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE__ : Tuple = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
SCREAMING_SNAKE_CASE__ : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
| 26 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowercase :Optional[int] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowercase :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase :Tuple = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[int] = ["CLIPFeatureExtractor"]
__lowercase :Union[str, Any] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Dict = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[int] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowercase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :int = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : List[Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ : Dict = ""
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = dct.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = val
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : int=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ViTConfig()
# patch_size
if model_name[-1] == "8":
SCREAMING_SNAKE_CASE__ : Optional[int] = 8
# set labels if required
if not base_model:
SCREAMING_SNAKE_CASE__ : Any = 1_000
SCREAMING_SNAKE_CASE__ : int = "huggingface/label-files"
SCREAMING_SNAKE_CASE__ : int = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE__ : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE__ : int = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 384
SCREAMING_SNAKE_CASE__ : int = 1_536
SCREAMING_SNAKE_CASE__ : str = 12
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 6
# load original model from torch hub
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.hub.load("facebookresearch/dino:main" , _lowerCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ : List[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if base_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTModel(_lowerCamelCase , add_pooling_layer=_lowerCamelCase ).eval()
else:
SCREAMING_SNAKE_CASE__ : int = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTImageProcessor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : str = encoding["pixel_values"]
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowerCamelCase )
if base_model:
SCREAMING_SNAKE_CASE__ : Any = original_model(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
SCREAMING_SNAKE_CASE__ : Dict = original_model(_lowerCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowercase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__lowercase :List[str] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 1 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : complex , _lowerCamelCase : str = "x" , _lowerCamelCase : float = 10**-10 , _lowerCamelCase : int = 1 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = symbols(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = lambdify(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ : str = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
SCREAMING_SNAKE_CASE__ : Any = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : List[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 26 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 1 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->int:
debug_launcher(test_script.main )
def A_ ( self : Union[str, Any] ) ->Optional[Any]:
debug_launcher(test_ops.main )
| 26 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : str = BlipImageProcessor()
SCREAMING_SNAKE_CASE__ : int = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE__ : Dict = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
SCREAMING_SNAKE_CASE__ : str = InstructBlipProcessor(a , a , a )
processor.save_pretrained(self.tmpdirname )
def A_ ( self : int , **a : Optional[Any] ) ->str:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).tokenizer
def A_ ( self : Tuple , **a : str ) ->Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def A_ ( self : Tuple , **a : int ) ->Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).qformer_tokenizer
def A_ ( self : Tuple ) ->Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A_ ( self : Optional[int] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : List[str] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor(do_normalize=a , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
self.assertIsInstance(processor.qformer_tokenizer , a )
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(a , return_tensors="np" )
SCREAMING_SNAKE_CASE__ : Tuple = processor(images=a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : int = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Dict = "lower newer"
SCREAMING_SNAKE_CASE__ : Any = processor(text=a )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(a , return_token_type_ids=a )
SCREAMING_SNAKE_CASE__ : str = qformer_tokenizer(a , return_token_type_ids=a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def A_ ( self : List[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[Any] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "lower newer"
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processor(text=a , images=a )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(a ):
processor()
def A_ ( self : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : int = processor.batch_decode(a )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def A_ ( self : Any ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = InstructBlipProcessor(
tokenizer=a , image_processor=a , qformer_tokenizer=a )
SCREAMING_SNAKE_CASE__ : Dict = "lower newer"
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : Any = processor(text=a , images=a )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 26 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :int = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__lowercase :Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = {}
with open(_lowerCamelCase , "r" ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Dict = line.strip()
if line:
SCREAMING_SNAKE_CASE__ : Optional[int] = line.split()
SCREAMING_SNAKE_CASE__ : Dict = line_number
SCREAMING_SNAKE_CASE__ : Dict = words[0]
SCREAMING_SNAKE_CASE__ : List[str] = value
return result
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE__ : Dict = getattr(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = PARAM_MAPPING[full_name.split("." )[-1]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "param"
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ : str = hf_pointer
for attribute in hf_param_name.split("." ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE__ : List[Any] = value[0]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : int = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PARAM_MAPPING[full_name.split("." )[-1]]
SCREAMING_SNAKE_CASE__ : Dict = "param"
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE__ : Any = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE__ : List[str] = ".".join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = key
SCREAMING_SNAKE_CASE__ : int = value if "lm_head" in full_key else value[0]
__lowercase :Optional[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE__ : List[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Any = name.split(_lowerCamelCase )[0].split("." )[-2]
SCREAMING_SNAKE_CASE__ : Dict = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : List[str] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : str = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : List[Any] = "weight"
else:
SCREAMING_SNAKE_CASE__ : Dict = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return is_used
return is_used
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE__ : List[str] = True
else:
SCREAMING_SNAKE_CASE__ : Any = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE__ : Optional[int] = name.split("." )
SCREAMING_SNAKE_CASE__ : str = int(items[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE__ : str = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Any=None , _lowerCamelCase : int=None , _lowerCamelCase : Dict=True , _lowerCamelCase : Dict=False ):
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : List[str] = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE__ : Dict = read_txt_into_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = idalabel
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaForSequenceClassification(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : Tuple = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : List[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : Dict = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : List[str] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : int = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE__ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = WavaVecaForCTC(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ : List[str] = argparse.Namespace(task="audio_pretraining" )
SCREAMING_SNAKE_CASE__ : List[str] = fairseq.tasks.setup_task(_lowerCamelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowercase :str = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__lowercase :List[str] = parser.parse_args()
__lowercase :Dict = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 26 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 1 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__lowercase :List[str] = logging.getLogger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : List[str] , a : Union[str, Any] , a : Tuple , a : Tuple=None , a : Any=None ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[str] = self.layer[current_layer](a , a , head_mask[current_layer] )
SCREAMING_SNAKE_CASE__ : Any = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : List[str] ) ->str:
super().__init__(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertEncoderWithPabee(a )
self.init_weights()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
def A_ ( self : Any , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] = threshold
def A_ ( self : Optional[Any] , a : List[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[Any] = patience
def A_ ( self : int ) ->int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
def A_ ( self : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = self.inference_layers_num / self.inference_instances_num
SCREAMING_SNAKE_CASE__ : List[str] = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(a )
@add_start_docstrings_to_model_forward(a )
def A_ ( self : Any , a : Optional[int]=None , a : Dict=None , a : Union[str, Any]=None , a : Optional[int]=None , a : Optional[int]=None , a : Dict=None , a : int=None , a : Union[str, Any]=None , a : Dict=None , a : List[Any]=None , a : Optional[Any]=False , ) ->Any:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = torch.ones(a , device=a )
if token_type_ids is None:
SCREAMING_SNAKE_CASE__ : Dict = torch.zeros(a , dtype=torch.long , device=a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE__ : torch.Tensor = self.get_extended_attention_mask(a , a , a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = encoder_hidden_states.size()
SCREAMING_SNAKE_CASE__ : Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : int = torch.ones(a , device=a )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.invert_attention_mask(a )
else:
SCREAMING_SNAKE_CASE__ : List[str] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_head_mask(a , self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.embeddings(
input_ids=a , position_ids=a , token_type_ids=a , inputs_embeds=a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = embedding_output
if self.training:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(self.config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.encoder.adaptive_forward(
a , current_layer=a , attention_mask=a , head_mask=a )
SCREAMING_SNAKE_CASE__ : int = self.pooler(a )
SCREAMING_SNAKE_CASE__ : List[str] = output_layers[i](output_dropout(a ) )
res.append(a )
elif self.patience == 0: # Use all layers for inference
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.encoder(
a , attention_mask=a , head_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pooler(encoder_outputs[0] )
SCREAMING_SNAKE_CASE__ : Tuple = [output_layers[self.config.num_hidden_layers - 1](a )]
else:
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : int = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
SCREAMING_SNAKE_CASE__ : List[Any] = self.encoder.adaptive_forward(
a , current_layer=a , attention_mask=a , head_mask=a )
SCREAMING_SNAKE_CASE__ : Dict = self.pooler(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_layers[i](a )
if regression:
SCREAMING_SNAKE_CASE__ : int = logits.detach()
if patient_result is not None:
SCREAMING_SNAKE_CASE__ : int = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
SCREAMING_SNAKE_CASE__ : Any = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a ) ):
patient_counter += 1
else:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : int = logits
if patient_counter == self.patience:
break
SCREAMING_SNAKE_CASE__ : Any = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any , a : List[str] ) ->Any:
super().__init__(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertModelWithPabee(a )
SCREAMING_SNAKE_CASE__ : Any = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ : Tuple = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a )
def A_ ( self : str , a : List[Any]=None , a : Optional[int]=None , a : Tuple=None , a : Dict=None , a : str=None , a : str=None , a : Optional[int]=None , ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[str] = self.bert(
input_ids=a , attention_mask=a , token_type_ids=a , position_ids=a , head_mask=a , inputs_embeds=a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
SCREAMING_SNAKE_CASE__ : List[Any] = (logits[-1],)
if labels is not None:
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[str] = 0
for ix, logits_item in enumerate(a ):
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MSELoss()
SCREAMING_SNAKE_CASE__ : Tuple = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : List[Any] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
SCREAMING_SNAKE_CASE__ : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
SCREAMING_SNAKE_CASE__ : Dict = (total_loss / total_weights,) + outputs
return outputs
| 26 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__lowercase :Dict = logging.get_logger(__name__)
__lowercase :List[Any] = {"vocab_file": "spiece.model"}
__lowercase :Union[str, Any] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__lowercase :List[Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
__lowercase :List[str] = "▁"
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Tuple , a : int , a : Optional[Any]="</s>" , a : Optional[Any]="<unk>" , a : Tuple="<pad>" , a : int=1_00 , a : Tuple=None , a : Optional[Dict[str, Any]] = None , a : List[Any]=True , **a : Dict , ) ->None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE__ : Tuple = [f"""<extra_id_{i}>""" for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE__ : Dict = len(set(filter(lambda a : bool("extra_id" in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = legacy
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , legacy=a , **a , )
SCREAMING_SNAKE_CASE__ : Dict = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = extra_ids
SCREAMING_SNAKE_CASE__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@staticmethod
def A_ ( a : Any , a : List[Any] , a : Dict ) ->str:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE__ : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a , )
return max_model_length
@property
def A_ ( self : List[str] ) ->int:
return self.sp_model.get_piece_size() + self._extra_ids
def A_ ( self : str ) ->int:
SCREAMING_SNAKE_CASE__ : Tuple = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def A_ ( self : Optional[int] ) ->List[Any]:
return list(
set(filter(lambda a : bool(re.search(r"<extra_id_\d+>" , a ) ) is not None , self.additional_special_tokens ) ) )
def A_ ( self : Union[str, Any] ) ->Dict:
return [self._convert_token_to_id(a ) for token in self.get_sentinel_tokens()]
def A_ ( self : str , a : List[int] ) ->List[int]:
if len(a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A_ ( self : str , a : List[int] , a : Optional[List[int]] = None ) ->List[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self : str , a : List[int] , a : Optional[List[int]] = None ) ->List[int]:
SCREAMING_SNAKE_CASE__ : Tuple = self._add_eos_if_not_present(a )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._add_eos_if_not_present(a )
return token_ids_a + token_ids_a
def __getstate__( self : List[Any] ) ->str:
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : str = None
return state
def __setstate__( self : Any , a : List[str] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : Dict , a : "TextInput" , **a : Dict ) ->List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
SCREAMING_SNAKE_CASE__ : Tuple = SPIECE_UNDERLINE + text.replace(a , " " )
return super().tokenize(a , **a )
def A_ ( self : List[str] , a : List[Any] , **a : Union[str, Any] ) ->List[str]:
if not self.legacy:
SCREAMING_SNAKE_CASE__ : int = text.startswith(a )
if is_first:
SCREAMING_SNAKE_CASE__ : int = text[1:]
SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.encode(a , out_type=a )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a ):
SCREAMING_SNAKE_CASE__ : Dict = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def A_ ( self : int , a : List[Any] ) ->Optional[Any]:
if token.startswith("<extra_id_" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = re.match(r"<extra_id_(\d+)>" , a )
SCREAMING_SNAKE_CASE__ : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a )
def A_ ( self : Optional[int] , a : Union[str, Any] ) ->List[str]:
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sp_model.IdToPiece(a )
else:
SCREAMING_SNAKE_CASE__ : int = f"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def A_ ( self : Any , a : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Dict = ""
SCREAMING_SNAKE_CASE__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
SCREAMING_SNAKE_CASE__ : int = True
SCREAMING_SNAKE_CASE__ : List[Any] = []
else:
current_sub_tokens.append(a )
SCREAMING_SNAKE_CASE__ : List[str] = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def A_ ( self : Any , a : str , a : Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : str = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : Any = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 26 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ) ->Any:
super().__init__()
SCREAMING_SNAKE_CASE__ : Any = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ : Any = nn.Linear(4 , 5 )
def A_ ( self : List[Any] , a : Dict ) ->Any:
return self.lineara(self.batchnorm(self.lineara(a ) ) )
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Optional[Any] , a : Union[str, Any] , *a : Union[str, Any] , **a : List[Any] ) ->Any:
return (args[0] + 1,) + args[1:], kwargs
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : str , a : Tuple , a : Tuple ) ->str:
return output + 1
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Any ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = ModelForTest()
SCREAMING_SNAKE_CASE__ : Dict = ModelHook()
add_hook_to_module(a , a )
self.assertEqual(test_model._hf_hook , a )
self.assertTrue(hasattr(a , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(a )
self.assertFalse(hasattr(a , "_hf_hook" ) )
self.assertFalse(hasattr(a , "_old_forward" ) )
def A_ ( self : str ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = ModelForTest()
SCREAMING_SNAKE_CASE__ : Optional[Any] = ModelHook()
add_hook_to_module(a , a )
add_hook_to_module(a , a , append=a )
self.assertEqual(isinstance(test_model._hf_hook , a ) , a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(a , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(a )
self.assertFalse(hasattr(a , "_hf_hook" ) )
self.assertFalse(hasattr(a , "_old_forward" ) )
def A_ ( self : List[str] ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Any = ModelForTest()
SCREAMING_SNAKE_CASE__ : Any = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : Any = test_model(x + 1 )
SCREAMING_SNAKE_CASE__ : int = test_model(x + 2 )
SCREAMING_SNAKE_CASE__ : List[str] = PreForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE__ : Optional[int] = test_model(a )
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
SCREAMING_SNAKE_CASE__ : Dict = PreForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE__ : Tuple = test_model(a )
self.assertTrue(torch.allclose(a , a , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
SCREAMING_SNAKE_CASE__ : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE__ : str = test_model(a )
assert torch.allclose(a , a , atol=1E-5 )
def A_ ( self : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : int = ModelForTest()
SCREAMING_SNAKE_CASE__ : Tuple = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : Tuple = test_model(a )
SCREAMING_SNAKE_CASE__ : str = PostForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = test_model(a )
self.assertTrue(torch.allclose(a , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
SCREAMING_SNAKE_CASE__ : Tuple = PostForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = test_model(a )
self.assertTrue(torch.allclose(a , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
SCREAMING_SNAKE_CASE__ : List[str] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE__ : Tuple = test_model(a )
assert torch.allclose(a , output + 2 , atol=1E-5 )
def A_ ( self : int ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : str = ModelForTest()
SCREAMING_SNAKE_CASE__ : Tuple = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : int = test_model(a )
SCREAMING_SNAKE_CASE__ : Dict = PostForwardHook()
add_hook_to_module(a , a )
SCREAMING_SNAKE_CASE__ : int = test_model(a )
self.assertTrue(torch.allclose(a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = test_model(a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A_ ( self : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
SCREAMING_SNAKE_CASE__ : Any = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a , AlignDevicesHook(io_same_device=a ) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.randn(2 , 3 ).to(0 )
SCREAMING_SNAKE_CASE__ : Tuple = model(a )
self.assertEqual(output.device , torch.device(0 ) )
def A_ ( self : Tuple ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE__ : int = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE__ : str = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , a )
SCREAMING_SNAKE_CASE__ : int = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : str = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
SCREAMING_SNAKE_CASE__ : Tuple = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
SCREAMING_SNAKE_CASE__ : int = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : Tuple = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE__ : Any = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(a , execution_device=a , offload=a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.device(a )
self.assertEqual(model.batchnorm.running_mean.device , a )
SCREAMING_SNAKE_CASE__ : Any = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : Dict = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a , execution_device=a , offload=a , offload_buffers=a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
SCREAMING_SNAKE_CASE__ : Tuple = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : Any = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def A_ ( self : Tuple ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
a , execution_device=a , offload=a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
SCREAMING_SNAKE_CASE__ : str = torch.device(a )
self.assertEqual(model.batchnorm.running_mean.device , a )
SCREAMING_SNAKE_CASE__ : Any = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : str = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a , execution_device=a , offload=a , weights_map=model.state_dict() , offload_buffers=a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.randn(2 , 3 )
SCREAMING_SNAKE_CASE__ : Dict = model(a )
self.assertEqual(output.device , a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 26 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = None
snake_case_ = BloomTokenizerFast
snake_case_ = BloomTokenizerFast
snake_case_ = True
snake_case_ = False
snake_case_ = "tokenizer_file"
snake_case_ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def A_ ( self : Tuple ) ->List[str]:
super().setUp()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : str , **a : Dict ) ->int:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **a )
def A_ ( self : int ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
SCREAMING_SNAKE_CASE__ : Optional[int] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_encode_plus(a )["input_ids"]
self.assertListEqual(a , a )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(a )
self.assertListEqual(a , a )
def A_ ( self : Dict , a : List[str]=6 ) ->str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.rust_tokenizer_class.from_pretrained(a , **a )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
SCREAMING_SNAKE_CASE__ : Dict = "This is a simple input"
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE__ : List[str] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE__ : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(a , max_length=a )
tokenizer_r.encode_plus(a , max_length=a )
tokenizer_r.batch_encode_plus(a , max_length=a )
tokenizer_r.encode(a , max_length=a )
tokenizer_r.batch_encode_plus(a , max_length=a )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
SCREAMING_SNAKE_CASE__ : str = None # Hotfixing padding = None
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding="max_length" )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding="max_length" )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding="max_length" , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding="max_length" )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding="max_length" )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding="max_length" , )
def A_ ( self : Any ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("xnli" , "all_languages" , split="test" , streaming=a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = next(iter(a ) )["premise"] # pick up one data
SCREAMING_SNAKE_CASE__ : Any = list(sample_data.values() )
SCREAMING_SNAKE_CASE__ : List[str] = list(map(tokenizer.encode , a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [tokenizer.decode(a , clean_up_tokenization_spaces=a ) for x in output_tokens]
self.assertListEqual(a , a )
def A_ ( self : Union[str, Any] ) ->str:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 26 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 1 |
import numpy as np
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : str = 1E-12 , _lowerCamelCase : Any = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE_ ) == np.iscomplexobj(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ : List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 1E12
while not convergence:
# Multiple matrix by the vector.
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Normalize the resulting output vector.
SCREAMING_SNAKE_CASE__ : List[str] = w / np.linalg.norm(SCREAMING_SNAKE_CASE_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
SCREAMING_SNAKE_CASE__ : Tuple = vector.conj().T if is_complex else vector.T
SCREAMING_SNAKE_CASE__ : int = np.dot(SCREAMING_SNAKE_CASE_ , np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check convergence.
SCREAMING_SNAKE_CASE__ : int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : List[Any] = lambda_
if is_complex:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
SCREAMING_SNAKE_CASE__ : Any = np.array([41, 4, 20] )
SCREAMING_SNAKE_CASE__ : Optional[int] = real_input_matrix.astype(np.complexaaa )
SCREAMING_SNAKE_CASE__ : int = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
SCREAMING_SNAKE_CASE__ : Optional[Any] = real_input_matrix
SCREAMING_SNAKE_CASE__ : List[str] = real_vector
elif problem_type == "complex":
SCREAMING_SNAKE_CASE__ : Any = complex_input_matrix
SCREAMING_SNAKE_CASE__ : Tuple = complex_vector
# Our implementation.
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = power_iteration(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = np.linalg.eigh(SCREAMING_SNAKE_CASE_ )
# Last eigenvalue is the maximum one.
SCREAMING_SNAKE_CASE__ : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
SCREAMING_SNAKE_CASE__ : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE_ ) - np.abs(SCREAMING_SNAKE_CASE_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 700 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
__lowercase :Any = logging.getLogger(__name__)
__lowercase :List[str] = 50 # max width of layer names
__lowercase :str = 70 # max width of quantizer names
def UpperCAmelCase ( _lowerCamelCase : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
if args.calibrator == "max":
SCREAMING_SNAKE_CASE__ : str = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
SCREAMING_SNAKE_CASE__ : Tuple = "histogram"
elif args.calibrator == "mse":
SCREAMING_SNAKE_CASE__ : List[Any] = "histogram"
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
SCREAMING_SNAKE_CASE__ : int = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : str=False , _lowerCamelCase : Tuple=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
def fusea(_lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
SCREAMING_SNAKE_CASE__ : str = qq._amax.detach().item()
SCREAMING_SNAKE_CASE__ : Tuple = qk._amax.detach().item()
SCREAMING_SNAKE_CASE__ : List[str] = qv._amax.detach().item()
SCREAMING_SNAKE_CASE__ : str = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
SCREAMING_SNAKE_CASE__ : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
SCREAMING_SNAKE_CASE__ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
SCREAMING_SNAKE_CASE__ : str = mod.weight.shape[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mod._weight_quantizer._amax.detach()
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def UpperCAmelCase ( _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
SCREAMING_SNAKE_CASE__ : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
SCREAMING_SNAKE_CASE__ : List[Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
SCREAMING_SNAKE_CASE__ : str = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
SCREAMING_SNAKE_CASE__ : List[str] = amax
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Any=25 , _lowerCamelCase : Any=180 , _lowerCamelCase : Tuple=None ):
'''simple docstring'''
if ignore is None:
SCREAMING_SNAKE_CASE__ : Any = []
elif not isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ : List[Any] = [ignore]
SCREAMING_SNAKE_CASE__ : int = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
SCREAMING_SNAKE_CASE__ : List[str] = max(__A , len(__A ) )
for name, mod in model.named_modules():
SCREAMING_SNAKE_CASE__ : int = getattr(__A , "_input_quantizer" , __A )
SCREAMING_SNAKE_CASE__ : Dict = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""Act:{input_q.extra_repr()}"""
SCREAMING_SNAKE_CASE__ : Dict = f"""Wgt:{weight_q.extra_repr()}"""
SCREAMING_SNAKE_CASE__ : int = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{' ':{name_width}} {wgt_str}""" )
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]="both" , **_lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : List[str] , **_lowerCamelCase : Any ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(__A , __A , __A )
logger.info(__A )
| 701 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : Optional[int] , a : List[Any]=13 , a : Optional[int]=7 , a : List[str]=True , a : int=True , a : List[Any]=True , a : Optional[int]=True , a : List[str]=99 , a : Dict=24 , a : List[str]=2 , a : Optional[Any]=6 , a : int=37 , a : Optional[Any]="gelu" , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Any=5_12 , a : str=16 , a : List[str]=2 , a : Optional[int]=0.02 , a : Tuple=3 , a : Union[str, Any]=None , a : Any=10_00 , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Any = seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : int = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = scope
SCREAMING_SNAKE_CASE__ : Any = range_bbox
def A_ ( self : Tuple ) ->int:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ : List[str] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ : List[Any] = t
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A_ ( self : int ) ->Optional[int]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A_ ( self : Any , a : Union[str, Any] , a : Any , a : Dict , a : int , a : Union[str, Any] , a : Optional[Any] , a : Optional[int] , ) ->str:
SCREAMING_SNAKE_CASE__ : Dict = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Any = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : int = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : int , a : str , a : Any , a : Tuple , a : Union[str, Any] , a : Optional[int] , a : str , a : List[Any] , ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Any , a : List[str] , a : Dict , a : str , a : List[str] , a : int , a : List[Any] , a : List[str] , ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : int ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _a ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : str , a : int , a : List[str] , a : Dict , a : int , a : Optional[int] ) ->Optional[int]:
return True
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = LiltModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def A_ ( self : str ) ->List[Any]:
self.config_tester.run_common_tests()
def A_ ( self : List[str] ) ->int:
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : int = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : List[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def A_ ( self : Union[str, Any] ) ->Optional[int]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : List[Any] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ : str = torch.Size([1, 2, 7_68] )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _a :
"""simple docstring"""
snake_case_ = XGLMConfig
snake_case_ = {}
snake_case_ = "gelu"
def __init__( self : int , a : Tuple , a : Optional[Any]=14 , a : Dict=7 , a : Union[str, Any]=True , a : List[Any]=True , a : Optional[int]=True , a : Dict=99 , a : Optional[int]=32 , a : Any=2 , a : Optional[int]=4 , a : List[str]=37 , a : Union[str, Any]="gelu" , a : List[str]=0.1 , a : Optional[Any]=0.1 , a : Tuple=5_12 , a : Any=0.02 , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = seq_length
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = ffn_dim
SCREAMING_SNAKE_CASE__ : List[str] = activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def A_ ( self : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A_ ( self : Optional[int] ) ->int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def A_ ( self : Optional[int] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
snake_case_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
snake_case_ = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[str] = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE__ : Any = ConfigTester(self , config_class=A_ , n_embd=37 )
def A_ ( self : List[str] ) ->Optional[int]:
self.config_tester.run_common_tests()
@slow
def A_ ( self : Optional[int] ) ->Dict:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[str] = TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def A_ ( self : List[Any] ) ->Any:
super().test_resize_token_embeddings()
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : str , a : Any=True ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE__ : Any = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
SCREAMING_SNAKE_CASE__ : Dict = model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def A_ ( self : Optional[int] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE__ : List[str] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer("Today is a nice day and" , return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Dict = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
SCREAMING_SNAKE_CASE__ : List[str] = model.generate(A_ , do_sample=A_ , seed=[7, 0] )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(A_ , A_ )
@slow
def A_ ( self : Tuple ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE__ : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE__ : List[str] = "left"
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Dict = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(A_ , return_tensors="tf" , padding=A_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["input_ids"]
SCREAMING_SNAKE_CASE__ : str = model.generate(input_ids=A_ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : Any = model.generate(input_ids=A_ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE__ : str = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : int = model.generate(input_ids=A_ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return str(_lowercase ) == str(_lowercase )[::-1]
def UpperCAmelCase ( _lowerCamelCase : List[Any] ):
'''simple docstring'''
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def UpperCAmelCase ( _lowerCamelCase : Any = 10_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = []
for num in range(1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Tuple = num
while iterations < 50:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(f"{solution() = }")
| 704 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = VideoToVideoSDPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = False
# No `output_type`.
snake_case_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def A_ ( self : int ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A_ ( self : str , a : Optional[int] , a : Tuple=0 ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A_ ( self : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[Any] = VideoToVideoSDPipeline(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = "np"
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**__lowerCamelCase ).frames
SCREAMING_SNAKE_CASE__ : Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A_ ( self : Dict ) ->int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCamelCase , expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A_ ( self : Tuple ) ->Dict:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A_ ( self : Tuple ) ->int:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A_ ( self : Dict ) ->List[Any]:
pass
def A_ ( self : Union[str, Any] ) ->Dict:
return super().test_progress_bar()
@slow
@skip_mps
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[int] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Any = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.randn((1, 10, 3, 10_24, 5_76) , generator=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = video.to("cuda" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "Spiderman is surfing"
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(__lowerCamelCase , video=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=3 , output_type="pt" ).frames
SCREAMING_SNAKE_CASE__ : Dict = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 705 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__lowercase :List[Any] = None
__lowercase :int = logging.get_logger(__name__)
__lowercase :List[str] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowercase :List[Any] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__lowercase :Any = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__lowercase :Tuple = "▁"
class _a ( __lowerCamelCase ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BigBirdTokenizer
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = []
def __init__( self : int , a : List[Any]=None , a : Optional[Any]=None , a : List[str]="<unk>" , a : Tuple="<s>" , a : Tuple="</s>" , a : Optional[int]="<pad>" , a : Optional[Any]="[SEP]" , a : List[Any]="[MASK]" , a : Any="[CLS]" , **a : Tuple , ) ->str:
SCREAMING_SNAKE_CASE__ : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else bos_token
SCREAMING_SNAKE_CASE__ : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else eos_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else unk_token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else pad_token
SCREAMING_SNAKE_CASE__ : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else cls_token
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ : int = vocab_file
SCREAMING_SNAKE_CASE__ : Tuple = False if not self.vocab_file else True
def A_ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A_ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) ->Dict:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def A_ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , a : str , a : Optional[str] = None ) ->Union[str, Any]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 706 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :int = logging.get_logger(__name__)
__lowercase :int = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class _a ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'gpt_bigcode'
snake_case_ = ['past_key_values']
snake_case_ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , a : str=5_02_57 , a : List[Any]=10_24 , a : Dict=7_68 , a : Union[str, Any]=12 , a : Union[str, Any]=12 , a : Tuple=None , a : int="gelu_pytorch_tanh" , a : Dict=0.1 , a : Dict=0.1 , a : Optional[int]=0.1 , a : Optional[int]=1E-5 , a : int=0.02 , a : Optional[int]=True , a : Tuple=True , a : Union[str, Any]=5_02_56 , a : str=5_02_56 , a : Optional[int]=True , a : List[Any]=True , a : List[str]=True , **a : Union[str, Any] , ) ->Any:
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : str = n_positions
SCREAMING_SNAKE_CASE__ : List[str] = n_embd
SCREAMING_SNAKE_CASE__ : str = n_layer
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_head
SCREAMING_SNAKE_CASE__ : Optional[int] = n_inner
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Dict = resid_pdrop
SCREAMING_SNAKE_CASE__ : str = embd_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = scale_attn_weights
SCREAMING_SNAKE_CASE__ : Any = use_cache
SCREAMING_SNAKE_CASE__ : List[str] = attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Any = scale_attention_softmax_in_fpaa
SCREAMING_SNAKE_CASE__ : Optional[Any] = multi_query
SCREAMING_SNAKE_CASE__ : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 707 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :Optional[int] = logging.get_logger()
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = field(default_factory=__A )
snake_case_ = field(default_factory=__A )
def A_ ( self : Any , a : Optional[int] , a : Tensor , a : Tensor ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any = len(list(m.modules() ) ) == 1 or isinstance(a , nn.Convad ) or isinstance(a , nn.BatchNormad )
if has_not_submodules:
self.traced.append(a )
def __call__( self : Tuple , a : Tensor ) ->int:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a )
[x.remove() for x in self.handles]
return self
@property
def A_ ( self : Dict ) ->Optional[Any]:
return list(filter(lambda a : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 1
snake_case_ = field(default_factory=__A )
snake_case_ = field(default_factory=__A )
snake_case_ = True
def __call__( self : str , a : Tensor ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : str = Tracker(self.dest )(a ).parametrized
SCREAMING_SNAKE_CASE__ : Optional[int] = Tracker(self.src )(a ).parametrized
SCREAMING_SNAKE_CASE__ : Any = list(filter(lambda a : type(a ) not in self.src_skip , a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = list(filter(lambda a : type(a ) not in self.dest_skip , a ) )
if len(a ) != len(a ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(a )} operations while"""
f""" destination module has {len(a )}.""" )
for dest_m, src_m in zip(a , a ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , a : nn.Module ) ->str:
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f"""Unexpected layer name {k}"""
SCREAMING_SNAKE_CASE__ : List[Any] = len(a ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.ModuleDict(a )
def A_ ( self : Tuple , a : Tensor ) ->List[Any]:
return get_trunk_forward_outputs(
a , out_feat_keys=a , feature_blocks=self._feature_blocks , )
class _a ( __A ):
"""simple docstring"""
def A_ ( self : Union[str, Any] , a : str ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[int] = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Dict , a : str ) ->Callable[[], Tuple[nn.Module, Dict]]:
if x not in self:
SCREAMING_SNAKE_CASE__ : List[Any] = self.convert_name_to_timm(a )
SCREAMING_SNAKE_CASE__ : int = partial(lambda: (timm.create_model(a , pretrained=a ).eval(), None) )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__getitem__(a )
return val
class _a ( __A ):
"""simple docstring"""
def __getitem__( self : List[str] , a : str ) ->Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
SCREAMING_SNAKE_CASE__ : Tuple = RegNetModel
else:
SCREAMING_SNAKE_CASE__ : Any = RegNetForImageClassification
return val
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Tuple[str, str]] ):
'''simple docstring'''
for from_key, to_key in keys:
SCREAMING_SNAKE_CASE__ : str = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Callable[[], nn.Module] , _lowerCamelCase : Callable[[], nn.Module] , _lowerCamelCase : RegNetConfig , _lowerCamelCase : Path , _lowerCamelCase : bool = True , ):
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = from_model_func()
SCREAMING_SNAKE_CASE__ : str = our_model_func(__A ).eval()
SCREAMING_SNAKE_CASE__ : str = ModuleTransfer(src=__A , dest=__A , raise_if_mismatch=__A )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(__A )
if from_state_dict is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : int = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
SCREAMING_SNAKE_CASE__ : str = manually_copy_vissl_head(__A , our_model.state_dict() , __A )
our_model.load_state_dict(__A )
SCREAMING_SNAKE_CASE__ : int = our_model(__A , output_hidden_states=__A )
SCREAMING_SNAKE_CASE__ : Dict = (
our_outputs.logits if isinstance(__A , __A ) else our_outputs.last_hidden_state
)
SCREAMING_SNAKE_CASE__ : Optional[int] = from_model(__A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = from_output[-1] if type(__A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(__A , __A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__A , )
SCREAMING_SNAKE_CASE__ : Optional[int] = 224 if '''seer''' not in name else 384
# we can use the convnext one
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__A , )
print(f"""Pushed {name}""" )
def UpperCAmelCase ( _lowerCamelCase : Path , _lowerCamelCase : str = None , _lowerCamelCase : bool = True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ : int = 1_000
SCREAMING_SNAKE_CASE__ : str = (1, num_labels)
SCREAMING_SNAKE_CASE__ : List[str] = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE__ : int = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = {int(__A ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE__ : Optional[int] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = partial(__A , num_labels=__A , idalabel=__A , labelaid=__A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type="x" ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type="x" ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type="x" ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type="x" ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type="x" ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type="x" ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type="x" ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
SCREAMING_SNAKE_CASE__ : List[Any] = NameToOurModelFuncMap()
SCREAMING_SNAKE_CASE__ : Any = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_lowerCamelCase : str , _lowerCamelCase : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
SCREAMING_SNAKE_CASE__ : Any = torch.hub.load_state_dict_from_url(__A , model_dir=str(__A ) , map_location="cpu" )
SCREAMING_SNAKE_CASE__ : Dict = model_func()
# check if we have a head, if yes add it
SCREAMING_SNAKE_CASE__ : int = files['''classy_state_dict''']['''base_model''']['''model''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_state_dict['''trunk''']
model.load_state_dict(__A )
return model.eval(), model_state_dict["heads"]
# pretrained
SCREAMING_SNAKE_CASE__ : Tuple = partial(
__A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
__A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : List[str] = partial(
__A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : str = partial(
__A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
SCREAMING_SNAKE_CASE__ : Dict = partial(
__A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Any = partial(
__A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
SCREAMING_SNAKE_CASE__ : Dict = partial(
__A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = partial(
__A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
__A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __A , __A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __A , __A , __A , )
return config, expected_shape
if __name__ == "__main__":
__lowercase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__lowercase :int = parser.parse_args()
__lowercase :Any = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 709 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase :str = "src/diffusers"
__lowercase :Dict = "."
# This is to make sure the diffusers module imported is the one in the repo.
__lowercase :Any = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowercase :Optional[Any] = spec.loader.load_module()
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return line.startswith(lowerCamelCase__ ) or len(lowerCamelCase__ ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , lowerCamelCase__ ) is not None
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = object_name.split("." )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE__ : List[Any] = parts[i]
while i < len(lowerCamelCase__ ) and not os.path.isfile(os.path.join(lowerCamelCase__ , f"""{module}.py""" ) ):
i += 1
if i < len(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(lowerCamelCase__ , parts[i] )
if i >= len(lowerCamelCase__ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(lowerCamelCase__ , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE__ : Optional[Any] = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCamelCase__ ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCamelCase__ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE__ : Any = line_index
while line_index < len(lowerCamelCase__ ) and _should_continue(lines[line_index] , lowerCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : Any = lines[start_index:line_index]
return "".join(lowerCamelCase__ )
__lowercase :List[Any] = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__lowercase :Tuple = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
__lowercase :Tuple = re.compile(R"<FILL\s+[^>]*>")
def UpperCAmelCase ( _lowerCamelCase : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = code.split("\n" )
SCREAMING_SNAKE_CASE__ : Any = 0
while idx < len(lowerCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCamelCase__ ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = len(get_indent(lowerCamelCase__ ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE__ : Tuple = f"""class Bla:\n{code}"""
SCREAMING_SNAKE_CASE__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = style_docstrings_in_code(lowerCamelCase__ )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict=False ):
'''simple docstring'''
with open(lowerCamelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f.readlines()
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE__ : Any = search.groups()
SCREAMING_SNAKE_CASE__ : List[Any] = find_code_in_diffusers(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_indent(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE__ : str = theoretical_indent
SCREAMING_SNAKE_CASE__ : Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE__ : Any = True
while line_index < len(lowerCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCamelCase__ ):
break
SCREAMING_SNAKE_CASE__ : Optional[int] = lines[line_index]
SCREAMING_SNAKE_CASE__ : int = _should_continue(lowerCamelCase__ , lowerCamelCase__ ) and re.search(f"""^{indent}# End copy""" , lowerCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : Dict = lines[start_index:line_index]
SCREAMING_SNAKE_CASE__ : List[Any] = "".join(lowerCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE__ : str = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(lowerCamelCase__ ) is None]
SCREAMING_SNAKE_CASE__ : int = "\n".join(lowerCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : Tuple = replace_pattern.replace("with" , "" ).split("," )
SCREAMING_SNAKE_CASE__ : int = [_re_replace_pattern.search(lowerCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE__ : Dict = pattern.groups()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE__ : Optional[int] = re.sub(obja.lower() , obja.lower() , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : str = re.sub(obja.upper() , obja.upper() , lowerCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE__ : List[Any] = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE__ : Any = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE__ : Optional[Any] = start_index + 1
if overwrite and len(lowerCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCamelCase__ )
return diffs
def UpperCAmelCase ( _lowerCamelCase : str = False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = glob.glob(os.path.join(lowerCamelCase__ , "**/*.py" ) , recursive=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for filename in all_files:
SCREAMING_SNAKE_CASE__ : Tuple = is_copy_consistent(lowerCamelCase__ , lowerCamelCase__ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(lowerCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE__ : Dict = "\n".join(lowerCamelCase__ )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
__lowercase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowercase :Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 710 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
"""simple docstring"""
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = "gelu"
def __init__( self : str , a : Tuple , a : Optional[Any]=13 , a : Tuple=7 , a : List[str]=True , a : Any=False , a : Tuple=99 , a : List[Any]=32 , a : Union[str, Any]=2 , a : Dict=4 , a : int=37 , a : Optional[Any]=0.1 , a : Optional[Any]=0.1 , a : Optional[int]=40 , a : List[str]=2 , a : List[str]=1 , a : List[Any]=0 , ) ->str:
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = pad_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bos_token_id
def A_ ( self : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : str = prepare_pegasus_inputs_dict(__A , __A , __A )
return config, inputs_dict
def A_ ( self : Optional[Any] , a : int , a : int ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Any = TFPegasusModel(config=__A ).get_decoder()
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE__ : str = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : Any = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE__ : Tuple = inputs_dict["head_mask"]
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
SCREAMING_SNAKE_CASE__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ : Dict = model(__A , attention_mask=__A )[0]
SCREAMING_SNAKE_CASE__ : List[str] = model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1E-3 )
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[str]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case_ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case_ = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
def A_ ( self : Tuple ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Tuple = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=__A )
def A_ ( self : int ) ->Optional[Any]:
self.config_tester.run_common_tests()
def A_ ( self : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
snake_case_ = [
"California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case_ = "google/pegasus-xsum"
@cached_property
def A_ ( self : Union[str, Any] ) ->List[str]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self : Tuple ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A_ ( self : List[Any] , **a : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.translate_src_text(**__A )
assert self.expected_text == generated_words
def A_ ( self : str , **a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(self.src_text , **__A , padding=__A , return_tensors="tf" )
SCREAMING_SNAKE_CASE__ : Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )
return generated_words
@slow
def A_ ( self : int ) ->Any:
self._assert_generated_batch_equal_expected()
| 711 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__lowercase :Any = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowercase :Any = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowercase :Dict = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__lowercase :Union[str, Any] = {
"num_train_timesteps": 40,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
__lowercase :List[str] = {
"num_train_timesteps": 201,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
__lowercase :int = {
"num_train_timesteps": 151,
"sigma_min": 0.0_0_2,
"sigma_max": 8_0.0,
}
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
SCREAMING_SNAKE_CASE__ : str = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
SCREAMING_SNAKE_CASE__ : str = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
SCREAMING_SNAKE_CASE__ : str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[f"""{old_prefix}.norm.weight"""]
SCREAMING_SNAKE_CASE__ : Any = checkpoint[f"""{old_prefix}.norm.bias"""]
SCREAMING_SNAKE_CASE__ : str = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Tuple = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Dict = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : int = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : str = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.load(__lowerCAmelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Any = checkpoint["""time_embed.0.weight"""]
SCREAMING_SNAKE_CASE__ : str = checkpoint["""time_embed.0.bias"""]
SCREAMING_SNAKE_CASE__ : int = checkpoint["""time_embed.2.weight"""]
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE__ : Dict = checkpoint["""label_emb.weight"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint["""input_blocks.0.0.weight"""]
SCREAMING_SNAKE_CASE__ : Dict = checkpoint["""input_blocks.0.0.bias"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = unet_config["""down_block_types"""]
SCREAMING_SNAKE_CASE__ : List[str] = unet_config["""layers_per_block"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = unet_config["""attention_head_dim"""]
SCREAMING_SNAKE_CASE__ : str = unet_config["""block_out_channels"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Any = channels_list[0]
for i, layer_type in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = channels_list[i]
SCREAMING_SNAKE_CASE__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = f"""down_blocks.{i}.resnets.{j}"""
SCREAMING_SNAKE_CASE__ : Tuple = f"""input_blocks.{current_layer}.0"""
SCREAMING_SNAKE_CASE__ : str = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ : Any = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = f"""down_blocks.{i}.resnets.{j}"""
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""input_blocks.{current_layer}.0"""
SCREAMING_SNAKE_CASE__ : int = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ : Optional[int] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = f"""down_blocks.{i}.attentions.{j}"""
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""input_blocks.{current_layer}.1"""
SCREAMING_SNAKE_CASE__ : List[str] = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : str = f"""down_blocks.{i}.downsamplers.0"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""input_blocks.{current_layer}.0"""
SCREAMING_SNAKE_CASE__ : List[Any] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
SCREAMING_SNAKE_CASE__ : Tuple = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE__ : Optional[Any] = """mid_block.resnets.0"""
SCREAMING_SNAKE_CASE__ : List[str] = """middle_block.0"""
SCREAMING_SNAKE_CASE__ : Any = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = """mid_block.attentions.0"""
SCREAMING_SNAKE_CASE__ : Tuple = """middle_block.1"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_attention(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = """mid_block.resnets.1"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """middle_block.2"""
SCREAMING_SNAKE_CASE__ : Optional[int] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = unet_config["""up_block_types"""]
for i, layer_type in enumerate(__lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = f"""up_blocks.{i}.resnets.{j}"""
SCREAMING_SNAKE_CASE__ : str = f"""output_blocks.{current_layer}.0"""
SCREAMING_SNAKE_CASE__ : Tuple = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : Any = f"""up_blocks.{i}.upsamplers.0"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""output_blocks.{current_layer-1}.1"""
SCREAMING_SNAKE_CASE__ : Any = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = f"""up_blocks.{i}.resnets.{j}"""
SCREAMING_SNAKE_CASE__ : Dict = f"""output_blocks.{current_layer}.0"""
SCREAMING_SNAKE_CASE__ : int = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = f"""up_blocks.{i}.attentions.{j}"""
SCREAMING_SNAKE_CASE__ : List[str] = f"""output_blocks.{current_layer}.1"""
SCREAMING_SNAKE_CASE__ : str = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : List[Any] = f"""up_blocks.{i}.upsamplers.0"""
SCREAMING_SNAKE_CASE__ : List[str] = f"""output_blocks.{current_layer-1}.2"""
SCREAMING_SNAKE_CASE__ : Optional[int] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = checkpoint["""out.0.weight"""]
SCREAMING_SNAKE_CASE__ : Dict = checkpoint["""out.0.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint["""out.2.weight"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__lowercase :List[str] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__lowercase :Union[str, Any] = parser.parse_args()
__lowercase :int = strabool(args.class_cond)
__lowercase :int = os.path.basename(args.unet_path)
print(F"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
__lowercase :Any = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowercase :int = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__lowercase :Optional[int] = TEST_UNET_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
__lowercase :Any = None
__lowercase :Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
__lowercase :Dict = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__lowercase :Optional[int] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__lowercase :int = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__lowercase :List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.")
__lowercase :List[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__lowercase :str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 712 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowercase = logging.get_logger(__name__)
class _a :
"""simple docstring"""
def __init__( self : int , a : str = None , a : uuid.UUID = None , a : Optional[int]=None , a : Union[str, Any]=None ) ->Union[str, Any]:
if not conversation_id:
SCREAMING_SNAKE_CASE__ : List[str] = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : Dict = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : int = conversation_id
SCREAMING_SNAKE_CASE__ : Dict = past_user_inputs
SCREAMING_SNAKE_CASE__ : str = generated_responses
SCREAMING_SNAKE_CASE__ : Optional[Any] = text
def __eq__( self : str , a : Tuple ) ->Optional[int]:
if not isinstance(a , a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def A_ ( self : Tuple , a : str , a : bool = False ) ->Dict:
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = text
def A_ ( self : int ) ->str:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
def A_ ( self : Optional[int] , a : str ) ->Optional[Any]:
self.generated_responses.append(a )
def A_ ( self : int ) ->List[Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Any ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : Optional[Any] = "user" if is_user else "bot"
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_snake_case , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class _a ( _snake_case ):
"""simple docstring"""
def __init__( self : str , *a : Tuple , **a : str ) ->Union[str, Any]:
super().__init__(*a , **a )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.eos_token
def A_ ( self : List[str] , a : Union[str, Any]=None , a : Tuple=None , a : Union[str, Any]=None , **a : Optional[Any] ) ->str:
SCREAMING_SNAKE_CASE__ : Any = {}
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Dict = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : int = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Any = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Dict = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , a : Union[Conversation, List[Conversation]] , a : Tuple=0 , **a : List[Any] ) ->Tuple:
SCREAMING_SNAKE_CASE__ : int = super().__call__(a , num_workers=a , **a )
if isinstance(a , a ) and len(a ) == 1:
return outputs[0]
return outputs
def A_ ( self : Union[str, Any] , a : Conversation , a : Optional[int]=32 ) ->int:
if not isinstance(a , a ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer._build_conversation_input_ids(a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._legacy_parse_and_tokenize(a )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def A_ ( self : int , a : List[str] , a : Tuple=10 , **a : List[str] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_kwargs.get("max_length" , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : int = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ : List[str] = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : str = model_inputs["attention_mask"][:, -trim:]
SCREAMING_SNAKE_CASE__ : Dict = model_inputs.pop("conversation" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_length
SCREAMING_SNAKE_CASE__ : int = self.model.generate(**a , **a )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : List[Any] = 1
else:
SCREAMING_SNAKE_CASE__ : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def A_ ( self : Optional[Any] , a : str , a : Union[str, Any]=True ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_outputs["output_ids"]
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
SCREAMING_SNAKE_CASE__ : int = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(a )
return conversation
def A_ ( self : Dict , a : Conversation ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) )
if len(a ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Any = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowercase :Optional[Any] = numpy.array([0, 0])
__lowercase :Optional[Any] = numpy.array([0.5, 0.8_6_6_0_2_5_4])
__lowercase :Any = numpy.array([1, 0])
__lowercase :List[str] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = initial_vectors
for _ in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : str = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i, start_vector in enumerate(vectors[:-1] ):
SCREAMING_SNAKE_CASE__ : Tuple = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : str = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = numpy.radians(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase ( _lowerCamelCase : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase :List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 714 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowercase :int = logging.get_logger(__name__)
# General docstring
__lowercase :List[Any] = "RegNetConfig"
# Base docstring
__lowercase :Union[str, Any] = "facebook/regnet-y-040"
__lowercase :Optional[int] = [1, 1_088, 7, 7]
# Image classification docstring
__lowercase :Dict = "facebook/regnet-y-040"
__lowercase :List[Any] = "tabby, tabby cat"
__lowercase :List[Any] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , a : int , a : int = 3 , a : int = 1 , a : int = 1 , a : Optional[str] = "relu" , **a : str , ) ->Optional[Any]:
super().__init__(**_UpperCamelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE__ : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.layers.ConvaD(
filters=_UpperCamelCase , kernel_size=_UpperCamelCase , strides=_UpperCamelCase , padding="VALID" , groups=_UpperCamelCase , use_bias=_UpperCamelCase , name="convolution" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity
def A_ ( self : List[Any] , a : int ) ->int:
SCREAMING_SNAKE_CASE__ : List[Any] = self.convolution(self.padding(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = self.normalization(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.activation(_UpperCamelCase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[Any] , a : RegNetConfig , **a : Union[str, Any] ) ->Union[str, Any]:
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = config.num_channels
SCREAMING_SNAKE_CASE__ : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def A_ ( self : List[Any] , a : Dict ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[int] = shape_list(_UpperCamelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.transpose(_UpperCamelCase , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.embedder(_UpperCamelCase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , a : int , a : int = 2 , **a : List[Any] ) ->Dict:
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Any = tf.keras.layers.ConvaD(
filters=_UpperCamelCase , kernel_size=1 , strides=_UpperCamelCase , use_bias=_UpperCamelCase , name="convolution" )
SCREAMING_SNAKE_CASE__ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def A_ ( self : str , a : tf.Tensor , a : bool = False ) ->tf.Tensor:
return self.normalization(self.convolution(_UpperCamelCase ) , training=_UpperCamelCase )
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , a : int , a : int , **a : List[Any] ) ->Dict:
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase , name="pooler" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
tf.keras.layers.ConvaD(filters=_UpperCamelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_UpperCamelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def A_ ( self : Dict , a : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : str = self.pooler(_UpperCamelCase )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE__ : str = layer_module(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = hidden_state * pooled
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , a : RegNetConfig , a : int , a : int , a : int = 1 , **a : List[str] ) ->List[Any]:
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE__ : Optional[int] = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE__ : Dict = (
TFRegNetShortCut(_UpperCamelCase , stride=_UpperCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase , name="layer.2" ),
]
SCREAMING_SNAKE_CASE__ : Dict = ACTaFN[config.hidden_act]
def A_ ( self : Dict , a : Any ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_module(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.shortcut(_UpperCamelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.activation(_UpperCamelCase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , a : RegNetConfig , a : int , a : int , a : int = 1 , **a : str ) ->Any:
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE__ : Any = (
TFRegNetShortCut(_UpperCamelCase , stride=_UpperCamelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
SCREAMING_SNAKE_CASE__ : Any = [
TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_UpperCamelCase , stride=_UpperCamelCase , groups=_UpperCamelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_UpperCamelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_UpperCamelCase , kernel_size=1 , activation=_UpperCamelCase , name="layer.3" ),
]
SCREAMING_SNAKE_CASE__ : List[str] = ACTaFN[config.hidden_act]
def A_ ( self : List[str] , a : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : str = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ : Tuple = layer_module(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.shortcut(_UpperCamelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE__ : Any = self.activation(_UpperCamelCase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , a : RegNetConfig , a : int , a : int , a : int = 2 , a : int = 2 , **a : int ) ->Optional[Any]:
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
SCREAMING_SNAKE_CASE__ : List[Any] = [
# downsampling is done in the first layer with stride of 2
layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , stride=_UpperCamelCase , name="layers.0" ),
*[layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def A_ ( self : Tuple , a : str ) ->int:
for layer_module in self.layers:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_module(_UpperCamelCase )
return hidden_state
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , a : RegNetConfig , **a : Any ) ->Any:
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_UpperCamelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
SCREAMING_SNAKE_CASE__ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_UpperCamelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , depth=_UpperCamelCase , name=f"""stages.{i+1}""" ) )
def A_ ( self : Union[str, Any] , a : tf.Tensor , a : bool = False , a : bool = True ) ->TFBaseModelOutputWithNoAttention:
SCREAMING_SNAKE_CASE__ : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE__ : Tuple = stage_module(_UpperCamelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase , hidden_states=_UpperCamelCase )
@keras_serializable
class _a ( tf.keras.layers.Layer ):
"""simple docstring"""
snake_case_ = RegNetConfig
def __init__( self : Optional[Any] , a : List[str] , **a : Any ) ->Tuple:
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = config
SCREAMING_SNAKE_CASE__ : Dict = TFRegNetEmbeddings(_UpperCamelCase , name="embedder" )
SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetEncoder(_UpperCamelCase , name="encoder" )
SCREAMING_SNAKE_CASE__ : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_UpperCamelCase , name="pooler" )
@unpack_inputs
def A_ ( self : Optional[int] , a : tf.Tensor , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
SCREAMING_SNAKE_CASE__ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : Optional[int] = self.embedder(_UpperCamelCase , training=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : int = self.encoder(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = encoder_outputs[0]
SCREAMING_SNAKE_CASE__ : Any = self.pooler(_UpperCamelCase )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE__ : Any = tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : int = tuple([tf.transpose(_UpperCamelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCamelCase , pooler_output=_UpperCamelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _a ( __lowerCAmelCase ):
"""simple docstring"""
snake_case_ = RegNetConfig
snake_case_ = "regnet"
snake_case_ = "pixel_values"
@property
def A_ ( self : int ) ->Tuple:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
__lowercase :Optional[int] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowercase :int = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __lowerCAmelCase , )
class _a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , a : RegNetConfig , *a : int , **a : List[Any] ) ->List[str]:
super().__init__(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : int = TFRegNetMainLayer(_UpperCamelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A_ ( self : List[Any] , a : tf.Tensor , a : Optional[bool] = None , a : Optional[bool] = None , a : Tuple=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
SCREAMING_SNAKE_CASE__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : str = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : List[str] = self.regnet(
pixel_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __lowerCAmelCase , )
class _a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
def __init__( self : str , a : RegNetConfig , *a : Optional[Any] , **a : List[Any] ) ->Dict:
super().__init__(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Any = config.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFRegNetMainLayer(_UpperCamelCase , name="regnet" )
# classification head
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A_ ( self : Optional[int] , a : tf.Tensor = None , a : tf.Tensor = None , a : bool = None , a : bool = None , a : List[Any]=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
SCREAMING_SNAKE_CASE__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : List[str] = self.regnet(
_UpperCamelCase , output_hidden_states=_UpperCamelCase , return_dict=_UpperCamelCase , training=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE__ : Any = self.classifier[0](_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.classifier[1](_UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = None if labels is None else self.hf_compute_loss(labels=_UpperCamelCase , logits=_UpperCamelCase )
if not return_dict:
SCREAMING_SNAKE_CASE__ : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_UpperCamelCase , logits=_UpperCamelCase , hidden_states=outputs.hidden_states )
| 715 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class _a :
"""simple docstring"""
def __init__( self : List[str] , a : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = data
# Initialize hash values
SCREAMING_SNAKE_CASE__ : Any = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE__ : str = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
SCREAMING_SNAKE_CASE__ : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A_ ( a : Tuple ) ->int:
SCREAMING_SNAKE_CASE__ : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
SCREAMING_SNAKE_CASE__ : Any = struct.pack(">Q" , (len(_A ) * 8) )
return data + padding + big_endian_integer
def A_ ( self : List[Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE__ : int = list(struct.unpack(">16L" , _A ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE__ : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE__ : List[str] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE__ : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE__ : str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.ror(_A , 6 ) ^ self.ror(_A , 11 ) ^ self.ror(_A , 25 )
SCREAMING_SNAKE_CASE__ : int = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
SCREAMING_SNAKE_CASE__ : int = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.ror(_A , 2 ) ^ self.ror(_A , 13 ) ^ self.ror(_A , 22 )
SCREAMING_SNAKE_CASE__ : Any = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE__ : Any = (sa + maj) % 0X1_0000_0000
SCREAMING_SNAKE_CASE__ : Tuple = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
SCREAMING_SNAKE_CASE__ : Any = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE__ : int = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE__ : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def A_ ( self : Union[str, Any] , a : str , a : Tuple ) ->int:
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Any ) ->Union[str, Any]:
import hashlib
SCREAMING_SNAKE_CASE__ : Any = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(_A ).hash , hashlib.shaaaa(_A ).hexdigest() )
def UpperCAmelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
SCREAMING_SNAKE_CASE__ : int = f.read()
else:
SCREAMING_SNAKE_CASE__ : int = bytes(_lowerCamelCase , "utf-8" )
print(SHAaaa(_lowerCamelCase ).hash )
if __name__ == "__main__":
main()
| 716 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : float | Decimal , _lowerCamelCase : float = 10**-10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = a
while True:
SCREAMING_SNAKE_CASE__ : Tuple = Decimal(a_ ) - (
Decimal(eval(a_ ) ) / Decimal(eval(str(diff(a_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a_ ) ) < precision: # noqa: S307
return float(a_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 718 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowercase :Optional[Any] = logging.get_logger(__name__)
class _a ( _UpperCAmelCase ):
"""simple docstring"""
snake_case_ = ['pixel_values']
def __init__( self : Optional[int] , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_size_dict(__UpperCamelCase , param_name="crop_size" )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Dict = size
SCREAMING_SNAKE_CASE__ : Optional[Any] = resample
SCREAMING_SNAKE_CASE__ : List[Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size
SCREAMING_SNAKE_CASE__ : str = do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE__ : str = do_normalize
SCREAMING_SNAKE_CASE__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : int = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Any = get_resize_output_image_size(__UpperCamelCase , size=size["shortest_edge"] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] ) ->np.ndarray:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ ( self : Optional[int] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) ->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def A_ ( self : Union[str, Any] , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Optional[int] , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : int = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(__UpperCamelCase , param_name="crop_size" )
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Any = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : List[str] = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[str] = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Dict = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : int = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE__ : int = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
def A_ ( self : Optional[int] , a : List[Any] , a : List[Tuple] = None ) ->int:
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ : str = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : int = []
for idx in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 719 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : int | None = None , _lowerCamelCase : int | None = None ):
'''simple docstring'''
if start is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if end is None:
SCREAMING_SNAKE_CASE__ : Any = len(_lowerCamelCase ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE__ : List[str] = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :List[str] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
snake_case_ = """pix2struct_text_model"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Union[str, Any] , a : Optional[Any]=5_02_44 , a : List[Any]=7_68 , a : Optional[int]=64 , a : Union[str, Any]=20_48 , a : int=12 , a : Dict=12 , a : Dict=32 , a : List[Any]=1_28 , a : Union[str, Any]=0.1 , a : Tuple=1E-6 , a : Optional[Any]=1.0 , a : Optional[Any]="gelu_new" , a : Tuple=0 , a : List[str]=False , a : Optional[Any]=0 , a : str=1 , a : Dict=False , a : Dict=True , **a : Optional[Any] , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = d_kv
SCREAMING_SNAKE_CASE__ : Dict = d_ff
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_layers
SCREAMING_SNAKE_CASE__ : Dict = num_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : List[Any] = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : List[str] = dropout_rate
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_factor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ : Any = eos_token_id
SCREAMING_SNAKE_CASE__ : Tuple = decoder_start_token_id
# for backwards compatibility
SCREAMING_SNAKE_CASE__ : str = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def A_ ( cls : Optional[int] , a : Dict , **a : List[Any] ) ->"PretrainedConfig":
cls._set_token_in_kwargs(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class UpperCamelCase__ ( _a ):
"""simple docstring"""
snake_case_ = """pix2struct_vision_model"""
def __init__( self : Optional[int] , a : Optional[int]=7_68 , a : List[Any]=7_68 , a : Any=20_48 , a : List[Any]=64 , a : Any=12 , a : Any=12 , a : Dict="gelu_new" , a : Optional[Any]=1E-6 , a : Optional[Any]=0.0 , a : Dict=0.0 , a : Tuple=1E-10 , a : Any=1.0 , a : str=40_96 , a : Tuple=32 , a : int=1_28 , **a : Any , ) ->Union[str, Any]:
super().__init__(**snake_case_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = patch_embed_hidden_size
SCREAMING_SNAKE_CASE__ : str = d_ff
SCREAMING_SNAKE_CASE__ : Any = dropout_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_factor
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = dense_act_fn
SCREAMING_SNAKE_CASE__ : Dict = seq_len
SCREAMING_SNAKE_CASE__ : int = relative_attention_num_buckets
SCREAMING_SNAKE_CASE__ : Dict = relative_attention_max_distance
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_kv
@classmethod
def A_ ( cls : str , a : Union[str, Any] , **a : List[Any] ) ->"PretrainedConfig":
cls._set_token_in_kwargs(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : str = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
SCREAMING_SNAKE_CASE__ : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class UpperCamelCase__ ( _a ):
"""simple docstring"""
snake_case_ = """pix2struct"""
snake_case_ = True
def __init__( self : List[Any] , a : str=None , a : Optional[Any]=None , a : Any=1.0 , a : List[str]=0.02 , a : Union[str, Any]=False , a : int=False , a : List[str]=True , **a : Optional[int] , ) ->List[str]:
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
SCREAMING_SNAKE_CASE__ : Dict = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : Dict = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PixaStructTextConfig(**snake_case_ )
SCREAMING_SNAKE_CASE__ : str = PixaStructVisionConfig(**snake_case_ )
SCREAMING_SNAKE_CASE__ : Tuple = self.text_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_config.pad_token_id
SCREAMING_SNAKE_CASE__ : Dict = self.text_config.eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_factor
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : str = self.initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = is_vqa
@classmethod
def A_ ( cls : Dict , a : Optional[Any] , a : int , **a : Union[str, Any] ) ->List[str]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def A_ ( self : Optional[Any] ) ->Any:
SCREAMING_SNAKE_CASE__ : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : str = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : Any = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.__class__.model_type
return output
| 720 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = 11
SCREAMING_SNAKE_CASE__ : Any = int("1" + "0" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
SCREAMING_SNAKE_CASE__ : str = 10
return solutions
def UpperCAmelCase ( _lowerCamelCase : int = 2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Any = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Optional[Any] = logging.get_logger(__name__)
__lowercase :Union[str, Any] = {
'''tanreinama/GPTSAN-2.8B-spout_is_uniform''': (
'''https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'''
),
}
class _a ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case_ = "gptsan-japanese"
snake_case_ = [
"past_key_values",
]
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , a : int=3_60_00 , a : Dict=12_80 , a : Optional[Any]=10_24 , a : Dict=81_92 , a : List[Any]=40_96 , a : List[str]=1_28 , a : Dict=10 , a : Tuple=0 , a : Any=16 , a : str=16 , a : Optional[int]=1_28 , a : Tuple=0.0 , a : Optional[Any]=1E-5 , a : str=False , a : str=0.0 , a : Optional[int]="float32" , a : Dict=False , a : Dict=False , a : Optional[Any]=False , a : Tuple=0.002 , a : List[str]=False , a : Union[str, Any]=True , a : Dict=3_59_98 , a : Dict=3_59_95 , a : Any=3_59_99 , **a : Optional[int] , ) ->int:
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = d_model
SCREAMING_SNAKE_CASE__ : Any = d_ff
SCREAMING_SNAKE_CASE__ : List[str] = d_ext
SCREAMING_SNAKE_CASE__ : str = d_spout
SCREAMING_SNAKE_CASE__ : str = num_switch_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_ext_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_switch_layers + num_ext_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_heads
SCREAMING_SNAKE_CASE__ : Any = num_experts
SCREAMING_SNAKE_CASE__ : List[str] = expert_capacity
SCREAMING_SNAKE_CASE__ : Dict = dropout_rate
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : List[Any] = router_bias
SCREAMING_SNAKE_CASE__ : Tuple = router_jitter_noise
SCREAMING_SNAKE_CASE__ : str = router_dtype
SCREAMING_SNAKE_CASE__ : Tuple = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ : str = output_hidden_states
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_attentions
SCREAMING_SNAKE_CASE__ : str = initializer_factor
SCREAMING_SNAKE_CASE__ : Tuple = output_router_logits
SCREAMING_SNAKE_CASE__ : List[Any] = use_cache
super().__init__(
separator_token_id=_lowercase , pad_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
| 721 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Dict ) ->str:
SCREAMING_SNAKE_CASE__ : Any = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : int = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def A_ ( self : int ) ->Union[str, Any]:
pass
@slow
@require_torch
def A_ ( self : int ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : str = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : List[Any] = audio_classifier(a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(a ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def A_ ( self : Optional[int] ) ->Union[str, Any]:
pass
| 26 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__lowercase :str = logging.get_logger(__name__)
__lowercase :List[Any] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _a ( _lowercase ):
"""simple docstring"""
snake_case_ = '''gptj'''
snake_case_ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , a : Tuple=5_04_00 , a : Union[str, Any]=20_48 , a : Tuple=40_96 , a : Any=28 , a : List[str]=16 , a : Tuple=64 , a : List[str]=None , a : Optional[int]="gelu_new" , a : Tuple=0.0 , a : Union[str, Any]=0.0 , a : Tuple=0.0 , a : Optional[int]=1E-5 , a : str=0.02 , a : Optional[Any]=True , a : Optional[Any]=5_02_56 , a : List[Any]=5_02_56 , a : Any=False , **a : int , ) ->int:
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : str = n_positions
SCREAMING_SNAKE_CASE__ : Optional[int] = n_embd
SCREAMING_SNAKE_CASE__ : List[Any] = n_layer
SCREAMING_SNAKE_CASE__ : int = n_head
SCREAMING_SNAKE_CASE__ : Dict = n_inner
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rotary_dim
SCREAMING_SNAKE_CASE__ : Dict = activation_function
SCREAMING_SNAKE_CASE__ : str = resid_pdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = embd_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : str = use_cache
SCREAMING_SNAKE_CASE__ : Tuple = bos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = eos_token_id
super().__init__(
bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ )
class _a ( _lowercase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ) ->Union[str, Any]:
super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ )
if not getattr(self._config , "pad_token_id" , A_ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE__ : List[str] = 0
@property
def A_ ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[int] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="inputs" )
SCREAMING_SNAKE_CASE__ : Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def A_ ( self : str ) ->int:
return self._config.n_layer
@property
def A_ ( self : int ) ->int:
return self._config.n_head
def A_ ( self : Optional[Any] , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : int = super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE__ : Optional[int] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : str = seqlen + 2
SCREAMING_SNAKE_CASE__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE__ : List[str] = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def A_ ( self : Union[str, Any] ) ->int:
return 13
| 700 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowercase :List[str] = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
__lowercase :str = get_tests_dir("fixtures/vocab.json")
__lowercase :Optional[int] = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def A_ ( self : Optional[Any] ) ->int:
SCREAMING_SNAKE_CASE__ : Dict = 0
def A_ ( self : Any ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : str = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : int ) ->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , "vocab.json" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[Any] ) ->Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : List[str] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(a )
config_dict.pop("processor_class" )
with open(os.path.join(a , a ) , "w" ) as f:
f.write(json.dumps(a ) )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Union[str, Any] ) ->str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , "w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def A_ ( self : Optional[Any] ) ->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
SCREAMING_SNAKE_CASE__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a , use_fast=a )
SCREAMING_SNAKE_CASE__ : List[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def A_ ( self : Tuple ) ->List[Any]:
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ : List[str] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Union[str, Any] ) ->int:
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = False
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("custom" , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE__ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE__ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def A_ ( self : Optional[Any] ) ->Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def A_ ( self : Dict ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(a )
@classmethod
def A_ ( cls : List[str] ) ->Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def A_ ( self : Dict ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor" ) , push_to_hub=a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , "test-processor-org" ) , push_to_hub=a , use_auth_token=self._token , organization="valid_org" , )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def A_ ( self : Any ) ->int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(a , "vocab.txt" )
with open(a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = CustomTokenizer(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token )
SCREAMING_SNAKE_CASE__ : str = Repository(a , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , "tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE__ : str = json.load(a )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , "custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE__ : List[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase :Optional[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Tuple = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__lowercase :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 26 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
SCREAMING_SNAKE_CASE__ : Any = DatasetInfosDict.from_directory(_lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : DatasetInfo ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = str(_lowerCamelCase )
dataset_info.write_to_directory(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = DatasetInfo.from_directory(_lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCamelCase , "dataset_info.json" ) )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
SCREAMING_SNAKE_CASE__ : Any = dataset_info._to_yaml_dict()
assert sorted(_lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
SCREAMING_SNAKE_CASE__ : str = yaml.safe_dump(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = yaml.safe_load(_lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = DatasetInfo()
SCREAMING_SNAKE_CASE__ : str = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : DatasetInfosDict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = str(_lowerCamelCase )
dataset_infos_dict.write_to_directory(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = DatasetInfosDict.from_directory(_lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE__ : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE__ : int = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCamelCase , "README.md" ) )
| 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = "char"
snake_case_ = "bpe"
snake_case_ = "wp"
__lowercase :Any = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = ["image_processor", "char_tokenizer"]
snake_case_ = "ViTImageProcessor"
snake_case_ = "MgpstrTokenizer"
def __init__( self : Union[str, Any] , a : Any=None , a : Dict=None , **a : Any ) ->Any:
SCREAMING_SNAKE_CASE__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__A , __A )
def __call__( self : List[str] , a : str=None , a : Optional[Any]=None , a : Dict=None , **a : Optional[Any] ) ->List[Any]:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.char_tokenizer(__A , return_tensors=__A , **__A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : Any = encodings["input_ids"]
return inputs
def A_ ( self : Optional[Any] , a : Optional[int] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] = sequences
SCREAMING_SNAKE_CASE__ : Optional[Any] = char_preds.size(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = self._decode_helper(__A , "char" )
SCREAMING_SNAKE_CASE__ : Tuple = self._decode_helper(__A , "bpe" )
SCREAMING_SNAKE_CASE__ : int = self._decode_helper(__A , "wp" )
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(__A ):
SCREAMING_SNAKE_CASE__ : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE__ : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE__ : str = scores.index(max(__A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE__ : Any = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = final_strs
SCREAMING_SNAKE_CASE__ : Tuple = final_scores
SCREAMING_SNAKE_CASE__ : Optional[Any] = char_strs
SCREAMING_SNAKE_CASE__ : Tuple = bpe_strs
SCREAMING_SNAKE_CASE__ : Dict = wp_strs
return out
def A_ ( self : str , a : List[Any] , a : Union[str, Any] ) ->Optional[Any]:
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE__ : int = self.char_decode
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = "[s]"
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.bpe_decode
SCREAMING_SNAKE_CASE__ : List[str] = 2
SCREAMING_SNAKE_CASE__ : Dict = "#"
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.wp_decode
SCREAMING_SNAKE_CASE__ : Tuple = 1_02
SCREAMING_SNAKE_CASE__ : Optional[Any] = "[SEP]"
else:
raise ValueError(f"""Format {format} is not supported.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [], []
SCREAMING_SNAKE_CASE__ : Tuple = pred_logits.size(0 )
SCREAMING_SNAKE_CASE__ : int = pred_logits.size(1 )
SCREAMING_SNAKE_CASE__ : Any = pred_logits.topk(1 , dim=-1 , largest=__A , sorted=__A )
SCREAMING_SNAKE_CASE__ : Tuple = preds_index.view(-1 , __A )[:, 1:]
SCREAMING_SNAKE_CASE__ : List[Any] = decoder(__A )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.nn.functional.softmax(__A , dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE__ : str = preds_max_prob[:, 1:]
for index in range(__A ):
SCREAMING_SNAKE_CASE__ : int = preds_str[index].find(__A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE__ : Any = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pred_index.index(__A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE__ : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__A )
conf_scores.append(__A )
return dec_strs, conf_scores
def A_ ( self : Union[str, Any] , a : Tuple ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__A )]
return decode_strs
def A_ ( self : List[Any] , a : Any ) ->Union[str, Any]:
return self.bpe_tokenizer.batch_decode(__A )
def A_ ( self : Optional[int] , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__A )]
return decode_strs
| 703 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__lowercase :List[Any] = logging.get_logger(__name__)
__lowercase :Optional[int] = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "longformer"
def __init__( self : List[str] , a : Union[List[int], int] = 5_12 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 3_05_22 , a : int = 7_68 , a : int = 12 , a : int = 12 , a : int = 30_72 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 5_12 , a : int = 2 , a : float = 0.02 , a : float = 1E-12 , a : bool = False , **a : Dict , ) ->Tuple:
super().__init__(pad_token_id=a , **a )
SCREAMING_SNAKE_CASE__ : int = attention_window
SCREAMING_SNAKE_CASE__ : Any = sep_token_id
SCREAMING_SNAKE_CASE__ : str = bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = onnx_export
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : int , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None ) ->str:
super().__init__(a , a , a )
SCREAMING_SNAKE_CASE__ : Any = True
@property
def A_ ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def A_ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch"}
return outputs
@property
def A_ ( self : str ) ->float:
return 1E-4
@property
def A_ ( self : Any ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def A_ ( self : str , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE__ : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
SCREAMING_SNAKE_CASE__ : str = 1
return inputs
| 26 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__lowercase :List[str] = logging.get_logger(__name__)
__lowercase :Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowercase :Union[str, Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__lowercase :List[Any] = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__lowercase :str = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class _a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = SqueezeBertTokenizer
def __init__( self : Optional[int] , a : Optional[Any]=None , a : Optional[int]=None , a : str=True , a : Any="[UNK]" , a : str="[SEP]" , a : Optional[Any]="[PAD]" , a : List[str]="[CLS]" , a : List[str]="[MASK]" , a : Optional[Any]=True , a : Optional[Any]=None , **a : Any , ) ->List[str]:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _a ) != do_lower_case
or normalizer_state.get("strip_accents" , _a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(_a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_lower_case
SCREAMING_SNAKE_CASE__ : List[str] = strip_accents
SCREAMING_SNAKE_CASE__ : Dict = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : List[str] = normalizer_class(**_a )
SCREAMING_SNAKE_CASE__ : Any = do_lower_case
def A_ ( self : int , a : Dict , a : Dict=None ) ->str:
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : Any , a : List[int] , a : Optional[List[int]] = None ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , a : str , a : Optional[str] = None ) ->str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 704 |
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowercase :Dict = "CompVis/stable-diffusion-v1-1"
__lowercase :Optional[Any] = "CompVis/stable-diffusion-v1-2"
__lowercase :List[str] = "CompVis/stable-diffusion-v1-3"
__lowercase :List[str] = "CompVis/stable-diffusion-v1-4"
class _a ( __a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a : AutoencoderKL , a : CLIPTextModel , a : CLIPTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a : StableDiffusionSafetyChecker , a : CLIPImageProcessor , a : bool = True , ) ->Union[str, Any]:
super()._init_()
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionPipeline.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : int = StableDiffusionPipeline.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionPipeline.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionPipeline(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=a_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
return {k: getattr(self , a_ ) for k in self.config.keys() if not k.startswith("_" )}
def A_ ( self : str , a : Optional[Union[str, int]] = "auto" ) ->List[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def A_ ( self : Union[str, Any] ) ->str:
self.enable_attention_slicing(a_ )
@torch.no_grad()
def A_ ( self : Union[str, Any] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : List[str] , ) ->List[Any]:
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A_ ( self : List[Any] , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Any , ) ->int:
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A_ ( self : Dict , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Optional[int] , ) ->str:
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A_ ( self : Any , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : Union[str, Any] , ) ->Tuple:
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A_ ( self : str , a : Union[str, List[str]] , a : int = 5_12 , a : int = 5_12 , a : int = 50 , a : float = 7.5 , a : Optional[Union[str, List[str]]] = None , a : Optional[int] = 1 , a : float = 0.0 , a : Optional[torch.Generator] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a : int = 1 , **a : int , ) ->str:
SCREAMING_SNAKE_CASE__ : int = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(a_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE__ : Dict = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE__ : str = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE__ : Optional[int] = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE__ : Optional[int] = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 705 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , a : Any , a : bool = True , a : Dict[str, int] = None , a : int = 32 , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = [0.4814_5466, 0.457_8275, 0.4082_1073] , a : Optional[Union[float, List[float]]] = [0.2686_2954, 0.2613_0258, 0.2757_7711] , a : bool = True , a : Any=7 , a : str=30 , a : Dict=4_00 , a : Optional[int]=3 , ) ->int:
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_88}
SCREAMING_SNAKE_CASE__ : List[Any] = size_divisor
SCREAMING_SNAKE_CASE__ : List[Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE__ : Dict = image_std
SCREAMING_SNAKE_CASE__ : List[str] = do_pad
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max_resolution
def A_ ( self : List[str] ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def A_ ( self : int , a : Optional[int] , a : Union[str, Any]=False ) ->Optional[Any]:
if not batched:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE__ : Dict = image_inputs[0]
if isinstance(a , Image.Image ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = image.size
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE__ : Any = size / min(a , a )
if h < w:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = size, scale * w
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = scale * h, size
SCREAMING_SNAKE_CASE__ : List[Any] = int((13_33 / 8_00) * size )
if max(a , a ) > max_size:
SCREAMING_SNAKE_CASE__ : List[Any] = max_size / max(a , a )
SCREAMING_SNAKE_CASE__ : int = newh * scale
SCREAMING_SNAKE_CASE__ : Optional[int] = neww * scale
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE__ : List[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a , key=lambda a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BridgeTowerImageProcessor if is_vision_available() else None
def A_ ( self : List[Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Any = BridgeTowerImageProcessingTester(self )
@property
def A_ ( self : Optional[int] ) ->Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) ->str:
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "size_divisor" ) )
def A_ ( self : List[Any] ) ->List[Any]:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : Optional[int] ) ->Any:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ) ->Optional[int]:
# Initialize image processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = self.image_processor_tester.get_expected_values(a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(a , return_tensors="pt" ).pixel_values
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor_tester.get_expected_values(a , batched=a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = StableDiffusionXLImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : Optional[Any] ) ->Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=32 , )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextModel(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModelWithProjection(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def A_ ( self : Any , a : List[Any] , a : str=0 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5
if str(__lowerCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def A_ ( self : str ) ->str:
SCREAMING_SNAKE_CASE__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = sd_pipe(**__lowerCAmelCase ).images
SCREAMING_SNAKE_CASE__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A_ ( self : Tuple ) ->Tuple:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def A_ ( self : Tuple ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A_ ( self : Optional[Any] ) ->Any:
pass
def A_ ( self : Tuple ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionXLImgaImgPipeline(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe.to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
# forward without prompt embeds
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inputs(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE__ : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE__ : str = 3 * [inputs["prompt"]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE__ : Dict = 3 * [inputs.pop("prompt" )]
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
) : Dict = sd_pipe.encode_prompt(__lowerCAmelCase , negative_prompt=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(
**__lowerCAmelCase , prompt_embeds=__lowerCAmelCase , negative_prompt_embeds=__lowerCAmelCase , pooled_prompt_embeds=__lowerCAmelCase , negative_pooled_prompt_embeds=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Optional[int] ) ->Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] , a : Optional[int] , a : Any="cpu" , a : int=torch.floataa , a : List[Any]=0 ) ->Dict:
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.RandomState(__lowerCAmelCase ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE__ : Dict = torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def A_ ( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_inputs(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**__lowerCAmelCase ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : int = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 706 |
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE__ : List[str] = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowerCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE__ : Dict = primes[:idx]
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE__ : str = False
for r in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE__ : str = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 26 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :Tuple = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__lowercase :str = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
SCREAMING_SNAKE_CASE__ : Dict = int(re.match(r".*layer_(\d*).*" , UpperCAmelCase__ )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def UpperCAmelCase ( _lowerCamelCase : Any ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
SCREAMING_SNAKE_CASE__ : Tuple = re.search(r"[^\d](\d+)$" , str(UpperCAmelCase__ ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
if bloom_config_file == "":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BloomConfig()
else:
SCREAMING_SNAKE_CASE__ : int = BloomConfig.from_json_file(UpperCAmelCase__ )
if shard_model:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.listdir(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sorted(filter(lambda _lowerCamelCase : s.startswith("layer" ) and "model_00" in s , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""weight_map""": {}, """metadata""": {}}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : str = BloomConfig()
for j, file in enumerate(UpperCAmelCase__ ):
print("Processing file: {}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ : Any = None
for i in range(UpperCAmelCase__ ):
# load all TP files
SCREAMING_SNAKE_CASE__ : Any = file.replace("model_00" , f"""model_0{i}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="cpu" )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE__ : int = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ : Any = temp.pop(UpperCAmelCase__ )
if tensors is None:
SCREAMING_SNAKE_CASE__ : str = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE__ : Any = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE__ : Dict = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase__ , os.path.join(
UpperCAmelCase__ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
SCREAMING_SNAKE_CASE__ : Optional[int] = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) )
SCREAMING_SNAKE_CASE__ : Dict = BloomConfig()
SCREAMING_SNAKE_CASE__ : int = pytorch_dump_folder_path + """/""" + CONFIG_NAME
SCREAMING_SNAKE_CASE__ : List[Any] = total_size
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + """\n"""
f.write(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = BloomModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : int = os.listdir(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(filter(lambda _lowerCamelCase : s.startswith("layer" ) and "model_00" in s , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
for i, file in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple = None
for i in range(UpperCAmelCase__ ):
# load all TP files
SCREAMING_SNAKE_CASE__ : List[str] = file.replace("model_00" , f"""model_0{i}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location="cpu" )
# Rename keys in the transformers names
SCREAMING_SNAKE_CASE__ : int = list(temp.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__ : int = temp.pop(UpperCAmelCase__ )
if tensors is None:
SCREAMING_SNAKE_CASE__ : Dict = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
SCREAMING_SNAKE_CASE__ : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
SCREAMING_SNAKE_CASE__ : int = tensors[key] / pretraining_tp
SCREAMING_SNAKE_CASE__ : Optional[int] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set(other_keys.missing_keys )
else:
SCREAMING_SNAKE_CASE__ : str = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE__ : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__lowercase :List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 707 |
import numpy
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , a : numpy.ndarray , a : numpy.ndarray ) ->None:
SCREAMING_SNAKE_CASE__ : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE__ : int = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE__ : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE__ : Tuple = numpy.zeros(output_array.shape )
def A_ ( self : Union[str, Any] ) ->numpy.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE__ : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A_ ( self : int ) ->None:
SCREAMING_SNAKE_CASE__ : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A_ ( self : int , a : numpy.ndarray , a : int , a : bool ) ->None:
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE__ : Dict = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE__ : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def A_ ( self : Tuple , a : numpy.ndarray ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_arr
SCREAMING_SNAKE_CASE__ : Dict = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE__ : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( _lowerCamelCase : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE__ : Any = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE__ : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowercase :Optional[Any] = False
__lowercase :List[Any] = True
__lowercase :List[Any] = False
if __name__ == "__main__":
__lowercase :Tuple = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__lowercase :Tuple = parser.parse_args()
__lowercase :List[str] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__lowercase :Optional[int] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__lowercase :Union[str, Any] = '''''' if has_file(args.repo_path, "config.json") else '''unet'''
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__lowercase :int = reader.read()
__lowercase :List[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__lowercase :List[Any] = UNetaDModel(**config)
else:
__lowercase :Dict = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__lowercase :List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowercase :str = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowercase :str = config[key]
del config[key]
__lowercase :int = [k.replace("UNetRes", "") for k in config['''down_block_types''']]
__lowercase :Tuple = [k.replace("UNetRes", "") for k in config['''up_block_types''']]
if do_only_weights:
__lowercase :List[Any] = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__lowercase :int = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__lowercase :List[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__lowercase :Union[str, Any] = param_value
__lowercase :str = True
if not has_changed:
__lowercase :List[Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
}
| 26 | 0 |
import baseaa
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCAmelCase ( _lowerCamelCase : bytes ):
'''simple docstring'''
return baseaa.baadecode(UpperCAmelCase__ ).decode("utf-8" )
if __name__ == "__main__":
__lowercase :List[str] = '''Hello World!'''
__lowercase :List[str] = baseaa_encode(test)
print(encoded)
__lowercase :str = baseaa_decode(encoded)
print(decoded)
| 709 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 26 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :Tuple = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : Optional[int] ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_snake_case )
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_snake_case , id=_snake_case )
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
# Doctest custom flag to ignore output.
__lowercase :Tuple = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Tuple = doctest.OutputChecker
class _a ( __UpperCAmelCase ):
"""simple docstring"""
def A_ ( self : Tuple , a : int , a : Tuple , a : Union[str, Any] ) ->Union[str, Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__lowercase :Optional[int] = CustomOutputChecker
__lowercase :str = HfDoctestModule
__lowercase :Optional[int] = HfDocTestParser
| 710 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
SCREAMING_SNAKE_CASE__ : int = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
SCREAMING_SNAKE_CASE__ : str = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowercase :List[str] = [randint(-1_000, 1_000) for i in range(100)]
__lowercase :Any = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 26 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase :Union[str, Any] = logging.get_logger(__name__)
__lowercase :Any = {'vocab_file': 'vocab.txt'}
__lowercase :str = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__lowercase :Optional[int] = {
'openbmb/cpm-ant-10b': 1_024,
}
def UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = collections.OrderedDict()
with open(_lowercase , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE__ : List[str] = reader.readlines()
for index, token in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token.rstrip("\n" )
SCREAMING_SNAKE_CASE__ : Optional[int] = index
return vocab
class _a ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , a : Optional[Any] , a : Union[str, Any]="<unk>" , a : List[Any]=2_00 ) ->str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab
SCREAMING_SNAKE_CASE__ : str = unk_token
SCREAMING_SNAKE_CASE__ : Any = max_input_chars_per_word
def A_ ( self : Any , a : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
while start < len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = None
while start < end:
SCREAMING_SNAKE_CASE__ : int = ''''''.join(chars[start:end] )
if substr in self.vocab:
SCREAMING_SNAKE_CASE__ : List[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = end
return sub_tokens
class _a ( UpperCamelCase_ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : List[Any] , a : Optional[int] , a : Union[str, Any]="<d>" , a : Union[str, Any]="</d>" , a : Dict="<s>" , a : int="</s>" , a : Optional[Any]="<pad>" , a : Union[str, Any]="<unk>" , a : int="</n>" , a : List[str]="</_>" , a : Tuple="left" , **a : List[str] , ) ->int:
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ : List[str] = bod_token
SCREAMING_SNAKE_CASE__ : Optional[int] = eod_token
SCREAMING_SNAKE_CASE__ : Dict = load_vocab(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = self.encoder[space_token]
SCREAMING_SNAKE_CASE__ : Any = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a : x[1] ) )
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A_ ( self : Union[str, Any] ) ->Dict:
return self.encoder[self.bod_token]
@property
def A_ ( self : Tuple ) ->Optional[Any]:
return self.encoder[self.eod_token]
@property
def A_ ( self : Optional[int] ) ->List[Any]:
return self.encoder["\n"]
@property
def A_ ( self : Optional[Any] ) ->int:
return len(self.encoder )
def A_ ( self : int ) ->int:
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self : Optional[int] , a : List[str] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : str = []
for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) )
return output_tokens
def A_ ( self : List[Any] , a : Union[str, Any] , **a : Dict ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [i for i in token_ids if i >= 0]
SCREAMING_SNAKE_CASE__ : int = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase__ , **UpperCamelCase__ )
def A_ ( self : Dict , a : Any ) ->List[Any]:
return token in self.encoder
def A_ ( self : int , a : List[str] ) ->Tuple:
return "".join(UpperCamelCase__ )
def A_ ( self : Any , a : Optional[Any] ) ->Any:
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def A_ ( self : str , a : str ) ->List[Any]:
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def A_ ( self : int , a : str , a : Optional[str] = None ) ->Optional[Any]:
if os.path.isdir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
SCREAMING_SNAKE_CASE__ : int = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
SCREAMING_SNAKE_CASE__ : List[Any] = 0
if " " in self.encoder:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
SCREAMING_SNAKE_CASE__ : str = self.encoder['''\n''']
del self.encoder["\n"]
SCREAMING_SNAKE_CASE__ : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a : x[1] ) )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def A_ ( self : Union[str, Any] , a : List[int] , a : List[int] = None ) ->str:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) ->Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ ))
| 711 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 26 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowercase :List[str] = None
__lowercase :Optional[Any] = logging.get_logger(__name__)
__lowercase :str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__lowercase :int = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
__lowercase :int = {
"camembert-base": 512,
}
__lowercase :List[str] = "▁"
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = CamembertTokenizer
def __init__( self : Optional[int] , a : List[str]=None , a : int=None , a : Union[str, Any]="<s>" , a : Any="</s>" , a : int="</s>" , a : Union[str, Any]="<s>" , a : Union[str, Any]="<unk>" , a : int="<pad>" , a : Optional[int]="<mask>" , a : Any=["<s>NOTUSED", "</s>NOTUSED"] , **a : int , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : List[Any] = False if not self.vocab_file else True
def A_ ( self : Union[str, Any] , a : List[str] , a : str = None ) ->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : Any , a : Union[str, Any] , a : List[str] = None ) ->List[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : Tuple , a : Any , a : int = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 712 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase :str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] , a : Optional[int] , a : str , a : int=None , a : Optional[Any]=1 ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict = tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(a ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE__ : Dict = n_copies
def __iter__( self : str ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(a , padding=a , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Dict , a : int , a : int , a : Tuple ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = start_length
SCREAMING_SNAKE_CASE__ : Any = eof_strings
SCREAMING_SNAKE_CASE__ : Any = tokenizer
def __call__( self : Any , a : Optional[int] , a : int , **a : Union[str, Any] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE__ : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a )
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : str=20 , **_lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE__ : Dict = batch["task_id"].repeat(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Any = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE__ : Dict = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE__ : List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE__ : str = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE__ : Dict = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE__ : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE__ : str = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE__ : Any = load_metric("code_eval" )
SCREAMING_SNAKE_CASE__ : Dict = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE__ : Dict = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE__ : int = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Tuple = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for task in tqdm(range(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 26 | 0 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase ( _lowerCamelCase : List[Any] = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
SCREAMING_SNAKE_CASE__ : str = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase :str = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Union[str, Any] = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__lowercase :str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 0 |
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ):
'''simple docstring'''
while b:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = b, a % b
return a
def UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b )
def UpperCAmelCase ( ):
'''simple docstring'''
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 714 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : int , _lowerCamelCase : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ : int = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ : int = [test_file]
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.load_dataset("csv" , data_files=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ : int = features_name.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ : List[str] = {label: i for i, label in enumerate(_lowerCamelCase )}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : List[Any] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ : Any = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="max_length" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ : int = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ : Dict = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ : Dict = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase :List[Any] = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=lowercase__ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : str = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE__ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 26 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowercase :Tuple = logging.get_logger(__name__)
__lowercase :Tuple = """▁"""
__lowercase :Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowercase :List[str] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
__lowercase :Dict = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
__lowercase :int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _a ( __snake_case ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = ['input_ids', 'attention_mask']
snake_case_ = []
snake_case_ = []
def __init__( self : Optional[Any] , a : List[str] , a : List[str]="<s>" , a : Optional[Any]="</s>" , a : Optional[int]="</s>" , a : Tuple="<s>" , a : Optional[int]="<unk>" , a : List[str]="<pad>" , a : Any="<mask>" , a : Union[str, Any]=None , a : Tuple=None , a : List[str]=None , a : Optional[int] = None , a : List[Any]=None , **a : int , ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
SCREAMING_SNAKE_CASE__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , tokenizer_file=A_ , src_lang=A_ , tgt_lang=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : int = len(self.sp_model )
SCREAMING_SNAKE_CASE__ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A_ )
}
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE__ : Any = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE__ : int = src_lang if src_lang is not None else "en_XX"
SCREAMING_SNAKE_CASE__ : str = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , a : List[str] ) ->List[str]:
SCREAMING_SNAKE_CASE__ : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def A_ ( self : Optional[int] ) ->int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A_ ( self : str ) ->Any:
return self._src_lang
@src_lang.setter
def A_ ( self : Optional[int] , a : int ) ->Dict:
SCREAMING_SNAKE_CASE__ : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A_ ( self : Optional[int] , a : Dict , a : Any = None , a : Tuple = False ) ->int:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ : int = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def A_ ( self : str , a : Union[str, Any] , a : int = None ) ->Optional[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A_ ( self : Any , a : Tuple , a : Union[str, Any] = None ) ->Any:
SCREAMING_SNAKE_CASE__ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self : int , a : Optional[int] , a : List[Any] , a : Optional[int] , a : int , **a : Dict ) ->int:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
SCREAMING_SNAKE_CASE__ : Tuple = src_lang
SCREAMING_SNAKE_CASE__ : Tuple = self(A_ , add_special_tokens=A_ , return_tensors=A_ , **A_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.convert_tokens_to_ids(A_ )
SCREAMING_SNAKE_CASE__ : Dict = tgt_lang_id
return inputs
def A_ ( self : int ) ->Any:
SCREAMING_SNAKE_CASE__ : Tuple = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : List[Any] , a : Optional[Any] ) ->int:
return self.sp_model.encode(A_ , out_type=A_ )
def A_ ( self : Optional[int] , a : Dict ) ->Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A_ ( self : Union[str, Any] , a : int ) ->List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self : Tuple , a : Tuple ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = "".join(A_ ).replace(A_ , " " ).strip()
return out_string
def A_ ( self : Union[str, Any] , a : Tuple , a : Dict = None ) ->List[Any]:
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : str = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def A_ ( self : List[str] , a : int , a : Tuple = "en_XX" , a : List[Any] = None , a : Optional[int] = "ro_RO" , **a : str , ) ->Dict:
SCREAMING_SNAKE_CASE__ : str = src_lang
SCREAMING_SNAKE_CASE__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def A_ ( self : Tuple ) ->Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def A_ ( self : List[str] ) ->Dict:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A_ ( self : Any , a : str ) ->str:
SCREAMING_SNAKE_CASE__ : List[str] = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def A_ ( self : Optional[Any] , a : Optional[int] ) ->int:
SCREAMING_SNAKE_CASE__ : List[Any] = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id, self.cur_lang_code]
| 715 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase :int = logging.get_logger(__name__)
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : int , a : bool = True , a : Optional[Dict[str, int]] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_55 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : List[str] , ) ->None:
super().__init__(**a )
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"shortest_edge": 2_56}
SCREAMING_SNAKE_CASE__ : Any = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : List[str] = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : int = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_rescale
SCREAMING_SNAKE_CASE__ : Any = rescale_factor
SCREAMING_SNAKE_CASE__ : int = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def A_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , ) ->np.ndarray:
SCREAMING_SNAKE_CASE__ : Tuple = get_size_dict(a )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def A_ ( self : Optional[int] , a : np.ndarray , a : float , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict ) ->np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def A_ ( self : Union[str, Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) ->np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def A_ ( self : Tuple , a : ImageInput , a : Optional[bool] = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : Optional[bool] = None , a : Optional[float] = None , a : Optional[bool] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a : Any , ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(a )
SCREAMING_SNAKE_CASE__ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Tuple = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ : List[Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : List[str] = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = [self.normalize(image=a , mean=a , std=a ) for image in images]
SCREAMING_SNAKE_CASE__ : Dict = [to_channel_dimension_format(a , a ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 26 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase :int = logging.get_logger(__name__)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE__ : Tuple = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
SCREAMING_SNAKE_CASE__ : Tuple = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE__ : Any = 366
SCREAMING_SNAKE_CASE__ : Optional[Any] = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE__ : Dict = 91
SCREAMING_SNAKE_CASE__ : Tuple = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Tuple = idalabel
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase ( _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dct.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = val
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE__ : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias[-hidden_size:]
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE__ : List[Any] = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
SCREAMING_SNAKE_CASE__ : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE__ : Dict = state_dict.pop(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Any = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_lowerCamelCase )
# load image processor
SCREAMING_SNAKE_CASE__ : Any = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = processor(images=_lowerCamelCase , return_tensors="pt" )
SCREAMING_SNAKE_CASE__ : List[str] = encoding["pixel_values"]
SCREAMING_SNAKE_CASE__ : List[str] = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__lowercase :int = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowercase :Optional[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 716 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self : Dict ) ->List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Dict ) ->Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : List[Any] = controlnet_params
SCREAMING_SNAKE_CASE__ : Dict = "bird"
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : List[str] = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : List[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def A_ ( self : List[Any] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = controlnet_params
SCREAMING_SNAKE_CASE__ : Any = "Chef in the kitchen"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
SCREAMING_SNAKE_CASE__ : str = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : List[str] = jax.random.split(a , jax.device_count() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = replicate(a )
SCREAMING_SNAKE_CASE__ : Tuple = shard(a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shard(a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 26 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowercase :str = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase :str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=8 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple=512 , _lowerCamelCase : Union[str, Any]=512 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE__ : Tuple = np.array(pil_image.convert("RGB" ) )
SCREAMING_SNAKE_CASE__ : List[str] = arr.astype(np.floataa ) / 127.5 - 1
SCREAMING_SNAKE_CASE__ : int = np.transpose(__lowerCAmelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE__ : str = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ) ->Union[str, Any]:
super().__init__()
self.register_modules(
unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , movq=lowerCamelCase__ , )
SCREAMING_SNAKE_CASE__ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A_ ( self : Optional[Any] , a : int , a : Tuple , a : Union[str, Any] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A_ ( self : List[Any] , a : Tuple , a : Dict , a : Optional[Any] , a : Dict , a : Union[str, Any] , a : List[str] , a : Dict=None ) ->Any:
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
SCREAMING_SNAKE_CASE__ : str = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image
else:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase__ )
]
SCREAMING_SNAKE_CASE__ : str = torch.cat(lowerCamelCase__ , dim=0 )
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.movq.encode(lowerCamelCase__ ).latent_dist.sample(lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE__ : int = init_latents.shape
SCREAMING_SNAKE_CASE__ : List[Any] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = init_latents
return latents
def A_ ( self : Any , a : List[Any]=0 ) ->str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE__ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE__ : Tuple = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__ , lowerCamelCase__ )
def A_ ( self : Any , a : Tuple=0 ) ->List[Any]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
SCREAMING_SNAKE_CASE__ : Dict = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE__ : Tuple = cpu_offload_with_hook(lowerCamelCase__ , lowerCamelCase__ , prev_module_hook=lowerCamelCase__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE__ : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A_ ( self : Any ) ->Optional[Any]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__( self : str , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_12 , a : int = 5_12 , a : int = 1_00 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ) ->str:
SCREAMING_SNAKE_CASE__ : Any = self._execution_device
SCREAMING_SNAKE_CASE__ : str = guidance_scale > 1.0
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat(lowerCamelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ : Any = image_embeds.shape[0]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(lowerCamelCase__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : str = image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
SCREAMING_SNAKE_CASE__ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase__ )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = [image]
if not all(isinstance(lowerCamelCase__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(lowerCamelCase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([prepare_image(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE__ : int = image.to(dtype=image_embeds.dtype , device=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = self.movq.encode(lowerCamelCase__ )["latents"]
SCREAMING_SNAKE_CASE__ : List[str] = latents.repeat_interleave(lowerCamelCase__ , dim=0 )
self.scheduler.set_timesteps(lowerCamelCase__ , device=lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = downscale_height_and_width(lowerCamelCase__ , lowerCamelCase__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_latents(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , image_embeds.dtype , lowerCamelCase__ , lowerCamelCase__ )
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"image_embeds": image_embeds}
SCREAMING_SNAKE_CASE__ : Any = self.unet(
sample=lowerCamelCase__ , timestep=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , added_cond_kwargs=lowerCamelCase__ , return_dict=lowerCamelCase__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Tuple = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE__ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , generator=lowerCamelCase__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.movq.decode(lowerCamelCase__ , force_not_quantize=lowerCamelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE__ : Tuple = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ : List[Any] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : Tuple = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 717 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__lowercase :Any = logging.get_logger(__name__)
__lowercase :str = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _a ( __a ):
"""simple docstring"""
snake_case_ = "blenderbot-small"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , a : Dict=5_02_65 , a : Any=5_12 , a : int=8 , a : Optional[int]=20_48 , a : Tuple=16 , a : Optional[Any]=8 , a : Optional[int]=20_48 , a : str=16 , a : Optional[Any]=0.0 , a : int=0.0 , a : str=True , a : int=True , a : Dict="gelu" , a : Any=5_12 , a : str=0.1 , a : int=0.0 , a : str=0.0 , a : Union[str, Any]=0.02 , a : Dict=1 , a : Union[str, Any]=False , a : Tuple=0 , a : List[Any]=1 , a : Union[str, Any]=2 , a : int=2 , **a : Union[str, Any] , ) ->List[Any]:
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = d_model
SCREAMING_SNAKE_CASE__ : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_function
SCREAMING_SNAKE_CASE__ : Optional[int] = init_std
SCREAMING_SNAKE_CASE__ : str = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
class _a ( __a ):
"""simple docstring"""
@property
def A_ ( self : Tuple ) ->Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Dict = {0: "batch"}
SCREAMING_SNAKE_CASE__ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE__ : Tuple = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE__ : List[str] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = self.num_layers
for i in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : str = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def A_ ( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : List[Any] = super(lowerCAmelCase_ , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_layers
for i in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : Any = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE__ : Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def A_ ( self : Dict , a : List[str] , a : str = -1 , a : List[str] = -1 , a : Optional[int] = False , a : List[Any] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[Any] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : str = dict(**lowerCAmelCase_ , **lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Union[str, Any] = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE__ : Any = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Dict = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = self.num_layers
SCREAMING_SNAKE_CASE__ : List[Any] = min(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Tuple = max(lowerCAmelCase_ , lowerCAmelCase_ ) - min_num_layers
SCREAMING_SNAKE_CASE__ : int = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCAmelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
torch.zeros(lowerCAmelCase_ ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : Dict = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCAmelCase_ , lowerCAmelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) )
return common_inputs
def A_ ( self : Optional[Any] , a : List[Any] , a : str = -1 , a : List[str] = -1 , a : List[Any] = False , a : Dict = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : List[str] = self.num_layers
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : List[str] = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(lowerCAmelCase_ )
]
return common_inputs
def A_ ( self : Optional[int] , a : List[Any] , a : List[str] = -1 , a : Union[str, Any] = -1 , a : Union[str, Any] = False , a : List[Any] = None , ) ->Mapping[str, Any]:
SCREAMING_SNAKE_CASE__ : int = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.num_special_tokens_to_add(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(
lowerCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Tuple = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : int = dict(tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) )
return common_inputs
def A_ ( self : Dict , a : Tuple , a : int = -1 , a : List[Any] = -1 , a : Optional[int] = False , a : Optional[Any] = None , ) ->Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
return common_inputs
def A_ ( self : Optional[int] , a : Optional[int] , a : Optional[Any] , a : List[Any] , a : str ) ->Tuple:
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : int = super()._flatten_past_key_values_(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ : Tuple = super(lowerCAmelCase_ , self )._flatten_past_key_values_(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 718 |
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.